o
    WhV                  	   @   s  d dl mZ d dlmZ d dlmZmZmZ d dlZd dl	Z
d dlmZmZmZmZmZ d dlmZmZmZmZmZ z3er?J ed du sGJ d dlZeed	sRJ d dlZd d
lmZ ddgZdZdZdZdZ dZ!d a"W n e#e$fyz   dZY nw de%fddZ&de%fddZ'de(fddZ)de(fddZ*de+de+fddZ,de-fddZ.de-fd d!Z/d"eddfd#d$Z0de1fd%d&Z2dee+ fd'd(Z3dKdee1 fd)d*Z4dLdee1 fd+d,Z5d-e%d.edeeee   fd/d0Z6dee fd1d2Z7de1fd3d4Z8dMd5d6Z9dKdMd7d8Z:dMd9d:Z;dMd;d<Z<dMd=d>Z=d?e(ddfd@dAZ>dMdBdCZ?dMdDdEZ@dMdFdGZAdMdHdIZBer?e?e@eAeBdJZCdS i ZCdS )N    )Callable)SimpleNamespace)AnyListOptionalN)LOGGERRANKSETTINGSTESTS_RUNNINGops)ClassifyMetrics
DetMetrics
OBBMetricsPoseMetricsSegmentMetricscometT__version__)Pathdetectsegment)confusion_matrixconfusion_matrix_normalized)F1_curveP_curveR_curvePR_curve)labelslabels_correlogram)BoxMask)r   Posereturnc                  C   s$   t d} | durtd | S dS )z\Returns the mode of comet set in the environment variables, defaults to 'online' if not set.
COMET_MODENa  The COMET_MODE environment variable is deprecated. Please use COMET_START_ONLINE to set the Comet experiment mode. To start an offline Comet experiment, use 'export COMET_START_ONLINE=0'. If COMET_START_ONLINE is not set or is set to '1', an online Comet experiment will be created.online)osgetenvr   warning)
comet_mode r(   U/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/utils/callbacks/comet.py_get_comet_mode'   s   
r*   c                   C   s   t ddS )zmReturns the model name for Comet from the environment variable COMET_MODEL_NAME or defaults to 'Ultralytics'.COMET_MODEL_NAMEUltralytics)r$   r%   r(   r(   r(   r)   _get_comet_model_name6   s   r-   c                   C      t tddS )z[Get the evaluation batch logging interval from environment variable or use default value 1.!COMET_EVAL_BATCH_LOGGING_INTERVAL   intr$   r%   r(   r(   r(   r)    _get_eval_batch_logging_interval;      r3   c                   C   r.   )zRGet the maximum number of image predictions to log from the environment variables.COMET_MAX_IMAGE_PREDICTIONSd   r1   r(   r(   r(   r)   !_get_max_image_predictions_to_log@   r4   r7   scorec                 C   s   t tdd}| | S )zSScales the given confidence score by a factor specified in an environment variable.COMET_MAX_CONFIDENCE_SCOREg      Y@)floatr$   r%   )r8   scaler(   r(   r)   _scale_confidence_scoreE   s   r<   c                   C   s   t dd dkS )z_Determines if the confusion matrix should be logged based on the environment variable settings.COMET_EVAL_LOG_CONFUSION_MATRIXfalsetruer$   r%   lowerr(   r(   r(   r)   _should_log_confusion_matrixK      rB   c                   C   s   t dd dkS )zVDetermines whether to log image predictions based on a specified environment variable. COMET_EVAL_LOG_IMAGE_PREDICTIONSr?   r@   r(   r(   r(   r)   _should_log_image_predictionsP   rC   rE   argsc              
   C   s   t dvrdS tddu rt }|dkrdndtjd< z+td| j}tj|d}|t	|  |
t t t t d	 |d
d W dS  tya } ztd|  W Y d}~dS d}~ww )z
    Resumes CometML experiment or creates a new experiment based on args.

    Ensures that the experiment object is only created in a single process during distributed training.
    >   r   NCOMET_START_ONLINEoffline10COMET_PROJECT_NAME)project_name)eval_batch_logging_intervallog_confusion_matrix_on_evallog_image_predictionsmax_image_predictionszCreated fromultralyticszEComet installed but not initialized correctly, not logging this run. )r   r$   r%   r*   environprojectcomet_mlstartlog_parametersvars
log_othersr3   rB   rE   r7   	log_other	Exceptionr   r&   )rF   r'   _project_name
experimenter(   r(   r)   _resume_or_create_experimentU   s*   r_   c           	      C   sp   | j d }t| jj| j }|| }|| jk}| jj}| jj}|| dk}|o/|dko/|o/| }t	||||dS )zKReturns metadata for YOLO training including epoch and asset saving status.r0   r   )
curr_epoch	curr_stepsave_assetsfinal_epoch)
epochlentrain_loaderdataset
batch_sizeepochsrF   savesave_perioddict)	trainerr`   train_num_steps_per_epochra   rc   rj   rk   save_intervalrb   r(   r(   r)   _fetch_trainer_metadatav   s   

rp   c                 C   s^   |\}}t j| ||d} t || ||} t | } | dd  | dd d 8  < |  } | S )z
    YOLO resizes images during training and the label values are normalized based on this resized shape.

    This function rescales the bounding box labels to the original image shape.
    )hwN   )r   
xywhn2xyxyscale_boxes	xyxy2xywhtolist)boxresized_image_shapeoriginal_image_shape	ratio_padresized_image_heightresized_image_widthr(   r(   r)   +_scale_bounding_box_to_original_image_shape   s   
 r~   c                    s   |d | k}|d | }t |dkrtd| d dS |d | d } r3 fd	d
|D }|d |  }|d |  }|d |  }	g }
t||D ]\}}t||||	}|
|gd| tdd qLd|
dS )aD  
    Format ground truth annotations for object detection.

    This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
    bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
    visualization or evaluation.

    Args:
        img_idx (int): Index of the image in the batch to process.
        image_path (str | Path): Path to the image file.
        batch (dict): Batch dictionary containing detection data with keys:
            - 'batch_idx': Tensor of batch indices
            - 'bboxes': Tensor of bounding boxes in normalized xywh format
            - 'cls': Tensor of class labels
            - 'ori_shape': Original image shapes
            - 'resized_shape': Resized image shapes
            - 'ratio_pad': Ratio and padding information
        class_name_map (dict | None, optional): Mapping from class indices to class names.

    Returns:
        (dict | None): Formatted ground truth annotations with the following structure:
            - 'boxes': List of box coordinates [x, y, width, height]
            - 'label': Label string with format "gt_{class_name}"
            - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
            Returns None if no bounding boxes are found for the image.
    	batch_idxbboxesr   Comet Image: z has no bounding boxes labelsNclsr0   c                    s   g | ]}t  | qS r(   )str).0labelclass_name_mapr(   r)   
<listcomp>       zB_format_ground_truth_annotations_for_detection.<locals>.<listcomp>	ori_shaperesized_shaper{   gt_g      ?boxesr   r8   ground_truthnamedata)	re   r   debugsqueezerw   zipr~   appendr<   )img_idx
image_pathbatchr   indicesr   
cls_labelsrz   ry   r{   r   rx   r   r(   r   r)   ._format_ground_truth_annotations_for_detection   s*   
r   c                    s  | j }| rt|n|}||}|std|  d dS |r. r. fdd| D }zddlm} W n t	yA   d}Y nw g }|D ]?}	|	d }
t
|	d	 }|	d
 }|r^t|| }|
g||d}|dur|	dd}|durxt||}|dur||d< || qFd|dS )z;Format YOLO predictions for object detection visualization.r   z" has no bounding boxes predictionsNc                    s   i | ]	\}} | |qS r(   r(   )r   kv	class_mapr(   r)   
<dictcomp>   s    z2_format_prediction_annotations.<locals>.<dictcomp>r   )decodebboxr8   category_idr   segmentationpoints
predictionr   )stem	isnumericr2   getr   r   itemspycocotools.maskr   ImportErrorr<   r    _extract_segmentation_annotationr   )r   metadataclass_label_mapr   r   image_idpredictionsr   r   r   r   r8   	cls_labelannotation_datasegmentsr(   r   r)   _format_prediction_annotations   s:   


r   segmentation_rawr   c              
   C   st   z|| }t |t jt j\}}dd |D }dd |D W S  ty9 } ztd|  W Y d}~dS d}~ww )aZ  
    Extracts segmentation annotation from compressed segmentations as list of polygons.

    Args:
        segmentation_raw: Raw segmentation data in compressed format.
        decode: Function to decode the compressed segmentation data.

    Returns:
        (Optional[List[List[Any]]]): List of polygon points or None if extraction fails.
    c                 S   s&   g | ]}t |d krt| qS )   )re   nparrayr   )r   polygonr(   r(   r)   r     s   & z4_extract_segmentation_annotation.<locals>.<listcomp>c                 S   s   g | ]}|   qS r(   )ravelrw   r   
annotationr(   r(   r)   r     r   z1Comet Failed to extract segmentation annotation: N)cv2findContours	RETR_LISTCHAIN_APPROX_SIMPLEr[   r   r&   )r   r   maskcontours_annotationsr^   r(   r(   r)   r      s   r   c           	      C   s<   t | |||}t||||}dd ||fD }|r|gS dS )z?Join the ground truth and prediction annotations if they exist.c                 S   s   g | ]}|d ur|qS Nr(   r   r(   r(   r)   r     s    z&_fetch_annotations.<locals>.<listcomp>N)r   r   )	r   r   r   prediction_metadata_mapr   r   ground_truth_annotationsprediction_annotationsr   r(   r(   r)   _fetch_annotations  s   r   c                 C   s4   i }| D ]}| |d g  ||d  | q|S )zNCreate metadata map for model predictions by groupings them based on image ID.r   )
setdefaultr   )model_predictionspred_metadata_mapr   r(   r(   r)   _create_prediction_metadata_map$  s
   r   c                 C   s>   |j jj}t|jd  dg }| j||t|||d dS )z-Log the confusion matrix to Comet experiment.names
background)matrixr   max_categoriesrd   stepN)	validatorr   r   listr   valueslog_confusion_matrixre   )r]   rm   ra   r`   conf_matr   r(   r(   r)   _log_confusion_matrix.  s
   

r   c                 C   sP   |rt ||D ]\}}| j||j||d qdS |D ]}| j||j|d qdS )a  
    Log images to the experiment with optional annotations.

    This function logs images to a Comet ML experiment, optionally including annotation data for visualization
    such as bounding boxes or segmentation masks.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log images to.
        image_paths (List[Path]): List of paths to images that will be logged.
        curr_step (int): Current training step/iteration for tracking in the experiment timeline.
        annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
            contains visualization data like bounding boxes, labels, and confidence scores.

    Returns:
        None
    )r   r   r   )r   r   N)r   	log_imager   )r]   image_pathsra   r   r   r   r(   r(   r)   _log_images7  s   r   c              
   C   s   |j j}|tvr
dS |j}|sdS t|}|j}|j}t|dd}t }	t	 }
t
|D ]=\}}|d |	 dkr8q+|d }t
|D ]'\}}t|
krL  dS t|}t||||||d}t| |g||d td7 aq@q+dS )a  
    Log predicted boxes for a single image during training.

    This function logs image predictions to a Comet ML experiment during model validation. It processes
    validation data and formats both ground truth and prediction annotations for visualization in the Comet
    dashboard. The function respects configured limits on the number of images to log.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log to.
        validator (BaseValidator): The validator instance containing validation data and predictions.
        curr_step (int): The current training step for logging timeline.

    Notes:
        This function uses global state to track the number of logged predictions across calls.
        It only logs predictions for supported tasks defined in COMET_SUPPORTED_TASKS.
        The number of logged images is limited by the COMET_MAX_IMAGE_PREDICTIONS environment variable.
    Nr   r0   r   im_filer   )r   )rF   taskCOMET_SUPPORTED_TASKSjdictr   
dataloaderr   getattrr3   r7   	enumerate_comet_image_prediction_countr   r   r   )r]   r   ra   r   r   predictions_metadata_mapr   r   r   batch_logging_intervalrQ   r   r   r   r   r   r   r(   r(   r)   _log_image_predictionsQ  sJ   
r   c                    s   d}t  jjtr jjjdkr fddtD }n#t  jjtr+ fddtD }nt  jjttfr= fddtD }|durGt	| |d  fddt
D }t	| |d t  jjtsn fddtD }t	| |d dS dS )	a  
    Log evaluation plots and label plots for the experiment.

    This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
    different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
    for each type.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log plots to.
        trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing validation metrics and save
            directory information.

    Examples:
        >>> from ultralytics.utils.callbacks.comet import _log_plots
        >>> _log_plots(experiment, trainer)
    Nr   c                    *   g | ]}t D ]} j| | d  qqS z.png)SEGMENT_METRICS_PLOT_PREFIXsave_dirr   plotsprefixrm   r(   r)   r         z_log_plots.<locals>.<listcomp>c                    r   r   )POSE_METRICS_PLOT_PREFIXr   r   r   r(   r)   r     r   c                       g | ]
} j | d  qS r   r   r   r   r   r(   r)   r         c                    r   r   r   r   r   r(   r)   r     r   c                    r   )z.jpgr   )r   r   r   r(   r)   r     r   )
isinstancer   metricsr   r   EVALUATION_PLOT_NAMESr   r   r   r   CONFUSION_MATRIX_PLOT_NAMESr   LABEL_PLOT_NAMES)r]   rm   plot_filenamesconfusion_matrix_filenameslabel_plot_filenamesr(   r   r)   
_log_plots  s&   

r   c                 C   s"   t  }| j|t|jddd dS )z'Log the best-trained model to Comet.ml.zbest.ptT)file_or_folder	file_name	overwriteN)r-   	log_modelr   best)r]   rm   
model_namer(   r(   r)   
_log_model  s   r  ra   c                 C   s,   t | |jd| t | |jd| dS )z>Log samples of images batches for train, validation, and test.ztrain_batch*.jpgzval_batch*.jpgN)r   r   glob)r]   rm   ra   r(   r(   r)   _log_image_batches  s   r  c                 C   s   t | j dS )zTCreates or resumes a CometML experiment at the start of a YOLO pre-training routine.N)r_   rF   r   r(   r(   r)   on_pretrain_routine_start  s   r	  c                 C   sH   t  }|sdS t| }|d }|d }|j| j| jdd||d dS )z@Log metrics and save batch images at the end of training epochs.Nr`   ra   train)r   r   rd   )rU   get_running_experimentrp   log_metricslabel_loss_itemstloss)rm   r]   r   r`   ra   r(   r(   r)   on_train_epoch_end  s    r  c                 C   s   t  }|sdS t| }|d }|d }|d }|j| j||d |j| j||d |dkr>ddlm} |j|| ||d |sBdS t||  t	 rQt
|| || t r]t|| j| dS dS )	aM  
    Log model assets at the end of each epoch during training.

    This function is called at the end of each training epoch to log metrics, learning rates, and model information
    to a Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on
    configuration settings.

    The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
    it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
    and image predictions (if enabled).

    Args:
        trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.

    Examples:
        >>> # Inside a training loop
        >>> on_fit_epoch_end(trainer)  # Log metrics and assets to Comet ML
    Nr`   ra   rb   r  r0   r   )model_info_for_loggers)rU   r  rp   r  r   lrultralytics.utils.torch_utilsr  r  rB   r   rE   r   r   )rm   r]   r   r`   ra   rb   r  r(   r(   r)   on_fit_epoch_end  s(   
r  c                 C   s   t  }|sdS t| }|d }|d }| jj}t||  |r$t||  t|| || t|| j	| t
|| | |  dadS )z*Perform operations at the end of training.Nr`   ra   r   )rU   r  rp   rF   r   r  r   r   r   r   r  endr   )rm   r]   r   r`   ra   r   r(   r(   r)   on_train_end  s   

r  )r	  r  r  r  r   )NN)r!   N)Dcollections.abcr   typesr   typingr   r   r   r   numpyr   ultralytics.utilsr   r   r	   r
   r   ultralytics.utils.metricsr   r   r   r   r   rU   hasattrr$   pathlibr   r   r   r   r   r   r   r   r   AssertionErrorr   r*   r-   r2   r3   r7   r:   r<   boolrB   rE   r_   rl   rp   r~   r   r   r   r   r   r   r   r   r   r  r  r	  r  r  r  	callbacksr(   r(   r(   r)   <module>   s~   !
7"*


	

?
,



-