| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251 |
- """
- Configuration Module
- Centralized configuration for paths, constants, and settings.
- """
- import os
- from pathlib import Path
- from typing import Dict, Tuple
- # ==================== PATHS ====================
- # Base paths
- PROJECT_ROOT = Path(__file__).parent.parent # Points to dudong-v2/
- # Model paths
- MODELS_DIR = PROJECT_ROOT / "model_files"
- # Use the directory containing all the model files
- AUDIO_MODEL_PATH = PROJECT_ROOT / "model_files" / "audio"
- DEFECT_MODEL_PATH = PROJECT_ROOT / "model_files" / "best.pt"
- LOCULE_MODEL_PATH = PROJECT_ROOT / "model_files" / "locule.pt"
- MATURITY_MODEL_PATH = PROJECT_ROOT / "model_files" / "multispectral" / "maturity" / "final_model.pt"
- # Image paths
- IMAGES_DIR = PROJECT_ROOT / "assets" / "logos"
- # Test data paths (optional - can be removed if not needed)
- UNSEEN_DIR = PROJECT_ROOT / "unseen"
- UNSEEN_AUDIO_UNRIPE = UNSEEN_DIR / "unripe"
- UNSEEN_AUDIO_MIDRIPE = UNSEEN_DIR / "midripe"
- UNSEEN_QUALITY = UNSEEN_DIR / "quality"
- # Data storage paths
- DATA_DIR = PROJECT_ROOT / "data"
- DATABASE_PATH = DATA_DIR / "database.db"
- ANALYSES_DIR = DATA_DIR / "analyses"
- # ==================== MODEL SETTINGS ====================
- # Device configuration
- DEVICE_PRIORITY = ["cuda", "cpu"] # Try CUDA first, fallback to CPU
- # Model versions (for display)
- MODEL_VERSIONS = {
- "ripeness": "",
- "quality": "",
- "defect": "",
- "maturity": "",
- }
- # Audio model settings
- AUDIO_SAMPLE_RATE = 16000
- AUDIO_DESIRED_SAMPLES = 16000
- AUDIO_FRAME_LENGTH = 255
- AUDIO_FRAME_STEP = 128
- # YOLO model settings
- YOLO_CONFIDENCE_THRESHOLD = 0.2
- YOLO_IMAGE_SIZE = 640
- # Maturity model settings
- MATURITY_MASK_BAND_INDEX = 4 # Band index for masking (860nm)
- MATURITY_IMG_SIZE = 256 # Target image size after preprocessing
- MATURITY_IMG_PAD = 8 # Padding for cropping
- # ==================== CLASS DEFINITIONS ====================
- # Ripeness classes (matching the three-class model from notebook)
- RIPENESS_CLASSES = ["unripe", "ripe", "overripe"]
- # Defect detection classes and colors (BGR format for OpenCV)
- DEFECT_CLASS_COLORS: Dict[int, Tuple[int, int, int]] = {
- 0: (255, 34, 134), # Minor defects - Pink/Magenta
- 1: (0, 252, 199), # No defects - Cyan/Turquoise
- 2: (86, 0, 254), # Reject - Purple
- }
- DEFECT_CLASS_NAMES = {
- 0: "Minor Defects",
- 1: "No Defects",
- 2: "Reject",
- }
- # Locule segmentation colors (BGR format) - ROYGBIV
- LOCULE_COLORS: list[Tuple[int, int, int]] = [
- (0, 0, 255), # Red
- (0, 165, 255), # Orange
- (0, 255, 255), # Yellow
- (0, 255, 0), # Green
- (255, 0, 0), # Blue
- (130, 0, 75), # Indigo
- (211, 0, 148), # Violet
- ]
- # ==================== UI SETTINGS ====================
- # Window settings
- WINDOW_TITLE = "DuDONG Grading System"
- WINDOW_WIDTH = 1920
- WINDOW_HEIGHT = 1080
- DEVICE_ID = "MAIN-001"
- # UI Colors (for PyQt styling)
- UI_COLORS = {
- # Primary colors
- "primary_dark": "#2c3e50",
- "primary_light": "#34495e",
- "accent_blue": "#3498db",
- "accent_green": "#27ae60",
-
- # Status colors
- "online": "#27ae60",
- "offline": "#e74c3c",
- "updating": "#f39c12",
-
- # Background colors
- "bg_light": "#f8f9fa",
- "bg_white": "#ffffff",
- "bg_panel": "#ecf0f1",
-
- # Text colors
- "text_dark": "#2c3e50",
- "text_medium": "#7f8c8d",
- "text_light": "#bdc3c7",
-
- # Button colors
- "btn_green": "#27ae60",
- "btn_green_hover": "#229954",
- "btn_blue": "#3498db",
- "btn_blue_hover": "#2980b9",
- "btn_orange": "#f39c12",
- "btn_orange_hover": "#e67e22",
- "btn_purple": "#9b59b6",
- "btn_purple_hover": "#8e44ad",
- "btn_red": "#e74c3c",
- "btn_red_hover": "#c0392b",
-
- # Grade colors
- "grade_a": "#27ae60",
- "grade_b": "#3498db",
- "grade_c": "#e74c3c",
- }
- # Status indicator sizes
- STATUS_INDICATOR_SIZE = 12
- STATUS_INDICATOR_RADIUS = 6
- # Table settings
- TABLE_ROW_HEIGHT = 45
- TABLE_RECENT_RESULTS_COUNT = 5
- TABLE_MAX_RESULTS_MEMORY = 100
- # Feed display sizes
- FEED_MIN_WIDTH = 150
- FEED_MIN_HEIGHT = 110
- # Image display sizes
- RESULT_IMAGE_WIDTH = 621
- RESULT_IMAGE_HEIGHT = 441
- # ==================== THREADING SETTINGS ====================
- # Thread pool settings
- MAX_THREAD_COUNT = None # None = use default (CPU count)
- # ==================== PERFORMANCE SETTINGS ====================
- # Spectrogram figure size
- SPECTROGRAM_FIG_SIZE = (8, 1.9)
- SPECTROGRAM_DPI = 100
- # ==================== LOGGING SETTINGS ====================
- # Log level
- LOG_LEVEL = "INFO"
- LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
- # ==================== FILE DIALOG FILTERS ====================
- FILE_FILTERS = {
- "audio": "Audio Files (*.wav *.mp3 *.flac *.ogg *.m4a *.aac *.wma);;WAV Files (*.wav);;MP3 Files (*.mp3);;FLAC Files (*.flac);;OGG Files (*.ogg);;M4A Files (*.m4a);;AAC Files (*.aac);;WMA Files (*.wma);;All Files (*.*)",
- "image": "Image Files (*.jpg *.jpeg *.png *.JPG *.JPEG *.PNG)",
- "tiff": "TIFF Files (*.tif *.tiff *.TIF *.TIFF);;All Files (*.*)",
- "all_media": "All Files (*.*)",
- }
- # Default directories for file dialogs
- DEFAULT_DIRS = {
- "audio": str(UNSEEN_DIR),
- "image": str(PROJECT_ROOT),
- }
- # ==================== HELPER FUNCTIONS ====================
- def get_device() -> str:
- """
- Get the best available device for model inference.
-
- Returns:
- str: Device name ('cuda' or 'cpu')
- """
- import torch
-
- for device in DEVICE_PRIORITY:
- if device == "cuda" and torch.cuda.is_available():
- return "cuda"
- return "cpu"
- def get_gpu_info() -> str:
- """
- Get GPU information for display.
-
- Returns:
- str: GPU information string
- """
- import torch
-
- if torch.cuda.is_available():
- gpu_name = torch.cuda.get_device_name(0)
- return f"GPU: {gpu_name}"
- return "GPU: Not Available (Using CPU)"
- def ensure_paths_exist() -> None:
- """Create necessary directories if they don't exist."""
- # Create necessary directories
- for directory in [IMAGES_DIR, UNSEEN_DIR, DATA_DIR, ANALYSES_DIR]:
- directory.mkdir(parents=True, exist_ok=True)
- def validate_model_paths() -> Dict[str, bool]:
- """
- Check if all required model files exist.
-
- Returns:
- Dict[str, bool]: Dictionary of model availability
- """
- return {
- "audio": AUDIO_MODEL_PATH.exists(),
- "defect": DEFECT_MODEL_PATH.exists(),
- "locule": LOCULE_MODEL_PATH.exists(),
- "maturity": MATURITY_MODEL_PATH.exists(),
- }
- # ==================== INITIALIZATION ====================
- # Ensure paths exist when module is imported
- ensure_paths_exist()
|