o
    Wh                     @   s\   d dl m Z  d dlmZ d dlmZ d dlmZmZ d dlm	Z	m
Z
 G dd dejjZdS )	    )copy)yolo)SegmentationModel)DEFAULT_CFGRANK)plot_imagesplot_resultsc                       sJ   e Zd ZdZeddf fdd	ZdddZdd	 Zd
d Zdd Z	  Z
S )SegmentationTrainera  
    A class extending the DetectionTrainer class for training based on a segmentation model.

    This trainer specializes in handling segmentation tasks, extending the detection trainer with segmentation-specific
    functionality including model initialization, validation, and visualization.

    Attributes:
        loss_names (Tuple[str]): Names of the loss components used during training.

    Examples:
        >>> from ultralytics.models.yolo.segment import SegmentationTrainer
        >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
        >>> trainer = SegmentationTrainer(overrides=args)
        >>> trainer.train()
    Nc                    s(   |du ri }d|d< t  ||| dS )a}  
        Initialize a SegmentationTrainer object.

        This initializes a trainer for segmentation tasks, extending the detection trainer with segmentation-specific
        functionality. It sets the task to 'segment' and prepares the trainer for training segmentation models.

        Args:
            cfg (dict): Configuration dictionary with default training settings. Defaults to DEFAULT_CFG.
            overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
            _callbacks (list, optional): List of callback functions to be executed during training.

        Examples:
            >>> from ultralytics.models.yolo.segment import SegmentationTrainer
            >>> args = dict(model="yolov8n-seg.pt", data="coco8-seg.yaml", epochs=3)
            >>> trainer = SegmentationTrainer(overrides=args)
            >>> trainer.train()
        Nsegmenttask)super__init__)selfcfg	overrides
_callbacks	__class__ Y/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/models/yolo/segment/train.pyr      s   zSegmentationTrainer.__init__Tc                 C   s6   t || jd | jd |otdkd}|r|| |S )a  
        Initialize and return a SegmentationModel with specified configuration and weights.

        Args:
            cfg (dict | str | None): Model configuration. Can be a dictionary, a path to a YAML file, or None.
            weights (str | Path | None): Path to pretrained weights file.
            verbose (bool): Whether to display model information during initialization.

        Returns:
            (SegmentationModel): Initialized segmentation model with loaded weights if specified.

        Examples:
            >>> trainer = SegmentationTrainer()
            >>> model = trainer.get_model(cfg="yolov8n-seg.yaml")
            >>> model = trainer.get_model(weights="yolov8n-seg.pt", verbose=False)
        ncchannels)r   chverbose)r   datar   load)r   r   weightsr   modelr   r   r   	get_model3   s   $
zSegmentationTrainer.get_modelc                 C   s&   d| _ tjj| j| jt| j| jdS )zIReturn an instance of SegmentationValidator for validation of YOLO model.)box_lossseg_losscls_lossdfl_loss)save_dirargsr   )	
loss_namesr   r
   SegmentationValidatortest_loaderr$   r   r%   	callbacksr   r   r   r   get_validatorJ   s   z!SegmentationTrainer.get_validatorc                 C   sJ   t |d |d |d d|d |d |d | jd| d	 | jd
 dS )a#  
        Plot training sample images with labels, bounding boxes, and masks.

        This method creates a visualization of training batch images with their corresponding labels, bounding boxes,
        and segmentation masks, saving the result to a file for inspection and debugging.

        Args:
            batch (dict): Dictionary containing batch data with the following keys:
                'img': Images tensor
                'batch_idx': Batch indices for each box
                'cls': Class labels tensor (squeezed to remove last dimension)
                'bboxes': Bounding box coordinates tensor
                'masks': Segmentation masks tensor
                'im_file': List of image file paths
            ni (int): Current training iteration number, used for naming the output file.

        Examples:
            >>> trainer = SegmentationTrainer()
            >>> batch = {
            ...     "img": torch.rand(16, 3, 640, 640),
            ...     "batch_idx": torch.zeros(16),
            ...     "cls": torch.randint(0, 80, (16, 1)),
            ...     "bboxes": torch.rand(16, 4),
            ...     "masks": torch.rand(16, 640, 640),
            ...     "im_file": ["image1.jpg", "image2.jpg"],
            ... }
            >>> trainer.plot_training_samples(batch, ni=5)
        img	batch_idxclsr   bboxesmasksim_filetrain_batchz.jpg)r0   pathsfnameon_plotN)r   squeezer$   r5   )r   batchnir   r   r   plot_training_samplesQ   s   
z)SegmentationTrainer.plot_training_samplesc                 C   s   t | jd| jd dS )zPlots training/val metrics.T)filer
   r5   N)r   csvr5   r*   r   r   r   plot_metricsy   s   z SegmentationTrainer.plot_metrics)NNT)__name__
__module____qualname____doc__r   r   r   r+   r9   r<   __classcell__r   r   r   r   r	      s    
(r	   N)r   ultralytics.modelsr   ultralytics.nn.tasksr   ultralytics.utilsr   r   ultralytics.utils.plottingr   r   detectDetectionTrainerr	   r   r   r   r   <module>   s   