o
    Wh'H                     @   s   d dl Z d dlmZ d dlZd dlZd dlmZmZm	Z	 d dl
mZ d dlmZmZ d dlmZ d dlmZmZmZ d dlmZmZ G d	d
 d
eZdS )    N)Path)build_dataloaderbuild_yolo_dataset	converter)BaseValidator)LOGGERops)check_requirements)ConfusionMatrix
DetMetricsbox_iou)output_to_targetplot_imagesc                       s   e Zd ZdZd* fdd	Zdd Zdd Zd	d
 Zdd Zdd Z	dd Z
dd Zdd Zdd Zdd Zdd Zd+ddZdd Zd d! Zd"d# Zd$d% Zd&d' Zd(d) Z  ZS ),DetectionValidatora  
    A class extending the BaseValidator class for validation based on a detection model.

    This class implements validation functionality specific to object detection tasks, including metrics calculation,
    prediction processing, and visualization of results.

    Attributes:
        nt_per_class (np.ndarray): Number of targets per class.
        nt_per_image (np.ndarray): Number of targets per image.
        is_coco (bool): Whether the dataset is COCO.
        is_lvis (bool): Whether the dataset is LVIS.
        class_map (list): Mapping from model class indices to dataset class indices.
        metrics (DetMetrics): Object detection metrics calculator.
        iouv (torch.Tensor): IoU thresholds for mAP calculation.
        niou (int): Number of IoU thresholds.
        lb (list): List for storing ground truth labels for hybrid saving.
        jdict (list): List for storing JSON detection results.
        stats (dict): Dictionary for storing statistics during validation.

    Examples:
        >>> from ultralytics.models.yolo.detect import DetectionValidator
        >>> args = dict(model="yolo11n.pt", data="coco8.yaml")
        >>> validator = DetectionValidator(args=args)
        >>> validator()
    Nc                    sh   t  ||||| d| _d| _d| _d| _d| _d| j_t	| j
d| _tddd| _| j | _dS )a  
        Initialize detection validator with necessary variables and settings.

        Args:
            dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
            save_dir (Path, optional): Directory to save results.
            pbar (Any, optional): Progress bar for displaying progress.
            args (dict, optional): Arguments for the validator.
            _callbacks (list, optional): List of callback functions.
        NFdetect)save_dirg      ?gffffff?
   )super__init__nt_per_classnt_per_imageis_cocois_lvis	class_mapargstaskr   r   metricstorchlinspaceiouvnumelniou)self
dataloaderr   pbarr   
_callbacks	__class__ V/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/models/yolo/detect/val.pyr   ,   s   zDetectionValidator.__init__c                 C   sb   |d j | jdd|d< | jjr|d  n|d  d |d< dD ]}||  | j||< q"|S )z
        Preprocess batch of images for YOLO validation.

        Args:
            batch (dict): Batch containing images and annotations.

        Returns:
            (dict): Preprocessed batch.
        imgT)non_blocking   )	batch_idxclsbboxes)todevicer   halffloat)r"   batchkr(   r(   r)   
preprocessB   s
   
(zDetectionValidator.preprocessc                 C   s&  | j | jjd}t|to#d|v o#|tj dp#|tj d| _	t|to1d|v o1| j	 | _
| j	r:t nttdt|jd | _| j j| jjoX| j	sU| j
oX| j O  _|j| _t|j| _t|dd| _| j| j_| jj| j_t| j| jjd	| _d
| _g | _t g g g g g d| _!dS )z
        Initialize evaluation metrics for YOLO detection validation.

        Args:
            model (torch.nn.Module): Model to validate.
         cocozval2017.txtztest-dev2017.txtlvis   end2endF)ncconfr   )tpr=   pred_cls
target_cls
target_imgN)"datagetr   split
isinstancestrendswithossepr   r   r   coco80_to_coco91_classlistrangelennamesr   	save_jsonvaltrainingr<   getattrr;   r   plotsplotr
   r=   confusion_matrixseenjdictdictstats)r"   modelrP   r(   r(   r)   init_metricsS   s$   
"((
zDetectionValidator.init_metricsc                 C   s   dd S )zBReturn a formatted string summarizing class metrics of YOLO model.z%22s%11s%11s%11s%11s%11s%11s)ClassImages	InstanceszBox(PRmAP50z	mAP50-95)r(   )r"   r(   r(   r)   get_descm   s   zDetectionValidator.get_descc                 C   sP   t j|| jj| jj| jjdkrdn| jd| jjp| jj| jj	| j
| jjdkd	S )z
        Apply Non-maximum suppression to prediction outputs.

        Args:
            preds (torch.Tensor): Raw predictions from the model.

        Returns:
            (List[torch.Tensor]): Processed predictions after NMS.
        r   r   Tobb)r<   multi_labelagnosticmax_detr;   rotated)r   non_max_suppressionr   r=   iour   r<   
single_clsagnostic_nmsre   r;   )r"   predsr(   r(   r)   postprocessq   s   

zDetectionValidator.postprocessc           	      C   s   |d |k}|d |  d}|d | }|d | }|d jdd }|d	 | }t|rHt|tj|| jd
g d  }tj||||d |||||dS )a  
        Prepare a batch of images and annotations for validation.

        Args:
            si (int): Batch index.
            batch (dict): Batch data containing images and annotations.

        Returns:
            (dict): Prepared batch with processed annotations.
        r-   r.   r/   	ori_shaper*      N	ratio_padr1   )r:   r   r:   r   rp   )r.   bboxrn   imgszrp   )	squeezeshaperM   r   	xywh2xyxyr   tensorr1   scale_boxes)	r"   sir4   idxr.   rs   rn   rt   rp   r(   r(   r)   _prepare_batch   s   "z!DetectionValidator._prepare_batchc                 C   s:   |  }tj|d |ddddf |d |d d |S )a  
        Prepare predictions for evaluation against ground truth.

        Args:
            pred (torch.Tensor): Model predictions.
            pbatch (dict): Prepared batch information.

        Returns:
            (torch.Tensor): Prepared predictions in native space.
        rt   N   rn   rp   rr   )cloner   ry   )r"   predpbatchprednr(   r(   r)   _prepare_pred   s
   $z DetectionValidator._prepare_predc              
   C   s  t |D ]\}}|  jd7  _t|}ttjd| jdtjd| jdtj|| jtj| jdd}| 	||}|
d|
d}}	t|}
||d< | |d	< |dkru|
rt| j D ]}| j| ||  qZ| jjrt| jjd
|	|d q| jjrd|d
d
df< | ||}|d
d
df |d< |d
d
df |d< |
r| ||	||d< | jjr| j||	| | j D ]}| j| ||  q| jjr| ||d |  | jjr| || jj|d | jd t|d | j d  qd
S )z
        Update metrics with new predictions and ground truth.

        Args:
            preds (List[torch.Tensor]): List of predictions from the model.
            batch (dict): Batch data containing ground truth.
        r:   r   rq   )dtyper1   )r=   r?   r>   r.   rs   r@   rA   N)
detections	gt_bboxesgt_cls   r}   r=   r?   r>   im_filern   labelsz.txt)	enumeraterV   rM   rX   r   zerosr1   r!   boolr|   popuniquerY   keysappendr   rS   rU   process_batchri   r   _process_batchrO   pred_to_jsonsave_txtsave_one_txt	save_confr   r   stem)r"   rk   r4   rz   r   nprstatr   r.   rs   nlr5   r   r(   r(   r)   update_metrics   sT    z!DetectionValidator.update_metricsc                 O   s   | j | j_ | j| j_dS )z
        Set final values for metrics speed and confusion matrix.

        Args:
            *args (Any): Variable length argument list.
            **kwargs (Any): Arbitrary keyword arguments.
        N)speedr   rU   )r"   r   kwargsr(   r(   r)   finalize_metrics   s   
z#DetectionValidator.finalize_metricsc                 C   s   dd | j  D }tj|d t| jd| _tj|d t| jd| _|	dd t
|r>| jjdi |d| ji | jjS )	z
        Calculate and return metrics statistics.

        Returns:
            (dict): Dictionary containing metrics results.
        c                 S   s&   i | ]\}}|t |d   qS )r   )r   catcpunumpy).0r5   vr(   r(   r)   
<dictcomp>   s   & z0DetectionValidator.get_stats.<locals>.<dictcomp>r@   )	minlengthrA   Non_plotr(   )rY   itemsnpbincountastypeintr<   r   r   r   rM   r   processr   results_dict)r"   rY   r(   r(   r)   	get_stats   s   zDetectionValidator.get_statsc                 C   s  ddt | jj  }t|d| j| j g| j R   | j dkr0t	d| j
j d | j
jrf| jsf| jdkrft | jrft| jjD ]\}}t|| j| | j| | j| g| j|R   qG| j
jr~dD ]}| jj| j| j || jd	 qld
S d
S )z0Print training/validation set metrics per class.z%22s%11i%11iz%11.3gallr   zno labels found in z, set, can not compute metrics without labelsr:   )TF)r   rN   	normalizer   N)rM   r   r   r   inforV   r   summean_resultswarningr   r   verboserQ   r<   rY   r   ap_class_indexrN   r   class_resultrS   rU   rT   r   valuesr   )r"   pficr   r(   r(   r)   print_results   s    (",z DetectionValidator.print_resultsc                 C   s4   t ||ddddf }| |dddf ||S )a]  
        Return correct prediction matrix.

        Args:
            detections (torch.Tensor): Tensor of shape (N, 6) representing detections where each detection is
                (x1, y1, x2, y2, conf, class).
            gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground-truth bounding box coordinates. Each
                bounding box is of the format: (x1, y1, x2, y2).
            gt_cls (torch.Tensor): Tensor of shape (M,) representing target class indices.

        Returns:
            (torch.Tensor): Correct prediction matrix of shape (N, 10) for 10 IoU levels.
        Nr}   r   )r   match_predictions)r"   r   r   r   rh   r(   r(   r)   r     s   z!DetectionValidator._process_batchrP   c                 C   s   t | j||| j|| jdS )al  
        Build YOLO Dataset.

        Args:
            img_path (str): Path to the folder containing images.
            mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
            batch (int, optional): Size of batches, this is for `rect`.

        Returns:
            (Dataset): YOLO dataset.
        )modestride)r   r   rB   r   )r"   img_pathr   r4   r(   r(   r)   build_dataset$  s   z DetectionValidator.build_datasetc                 C   s&   | j ||dd}t||| jjdddS )a   
        Construct and return dataloader.

        Args:
            dataset_path (str): Path to the dataset.
            batch_size (int): Size of each batch.

        Returns:
            (torch.utils.data.DataLoader): Dataloader for validation.
        rP   )r4   r   Frm   )shufflerank)r   r   r   workers)r"   dataset_path
batch_sizedatasetr(   r(   r)   get_dataloader2  s   z!DetectionValidator.get_dataloaderc              
   C   sH   t |d |d |d d|d |d | jd| d | j| jd	 d
S )z
        Plot validation image samples.

        Args:
            batch (dict): Batch containing images and annotations.
            ni (int): Batch index.
        r*   r-   r.   rm   r/   r   	val_batchz_labels.jpgpathsfnamerN   r   N)r   ru   r   rN   r   )r"   r4   nir(   r(   r)   plot_val_samples@  s   
z#DetectionValidator.plot_val_samplesc                 C   sF   t |d gt|| jjdR |d | jd| d | j| jd dS )a  
        Plot predicted bounding boxes on input images and save the result.

        Args:
            batch (dict): Batch containing images and annotations.
            preds (List[torch.Tensor]): List of predictions from the model.
            ni (int): Batch index.
        r*   )re   r   r   z	_pred.jpgr   N)r   r   r   re   r   rN   r   )r"   r4   rk   r   r(   r(   r)   plot_predictionsS  s   	z#DetectionValidator.plot_predictionsc                 C   sT   ddl m} |tj|d |d ftjdd| j|ddddf dj||d dS )	a  
        Save YOLO detections to a txt file in normalized coordinates in a specific format.

        Args:
            predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
            save_conf (bool): Whether to save confidence scores.
            shape (tuple): Shape of the original image.
            file (Path): File path to save the detections.
        r   )Resultsr:   )r   N   )pathrN   boxes)r   )ultralytics.engine.resultsr   r   r   uint8rN   r   )r"   r   r   rv   filer   r(   r(   r)   r   e  s   

zDetectionValidator.save_one_txtc              	   C   s   t |j}| rt|n|}t|ddddf }|ddddf  |ddddf d 8  < t| | D ] \}}| j	|| j
t|d  dd |D t|d dd q=dS )z
        Serialize YOLO predictions to COCO json format.

        Args:
            predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
            filename (str): Image filename.
        Nr}   ro   r   c                 S   s   g | ]}t |d qS )   )roundr   xr(   r(   r)   
<listcomp>  s    z3DetectionValidator.pred_to_json.<locals>.<listcomp>)image_idcategory_idrs   score)r   r   	isnumericr   r   	xyxy2xywhziptolistrW   r   r   r   )r"   r   filenamer   r   boxpbr(   r(   r)   r   x  s   
0zDetectionValidator.pred_to_jsonc              
   C   sB  | j jr| js| jrt| jr| jd }| jd d | jr"dnd| j j d }| jr0dnd}t	
d	| d
| d| d z||fD ]}| sSJ | dqFt| jrZdnd | jrddlm} ddlm} |t|}|t|}	|||	d}
nddlm}m} |t|}|t|}	|||	d}
dd | jjjD |
j_|
  |
  |
  | jr|
  | jr|
j dd n	|
j!d |
j!d g\|| j"j#d < || j"j#d < | jr|
j!d |d< |
j!d |d < |
j!d! |d"< |
j!d |d#< W |S W |S  t$y } zt	%| d$|  W Y d}~|S d}~ww |S )%a  
        Evaluate YOLO output in JSON format and return performance statistics.

        Args:
            stats (dict): Current statistics dictionary.

        Returns:
            (dict): Updated statistics dictionary with COCO/LVIS evaluation results.
        zpredictions.jsonr   annotationszinstances_val2017.jsonlvis_v1_z.jsonpycocotoolsr9   z
Evaluating z mAP using z and z...z file not foundzpycocotools>=2.0.6zlvis>=0.5.3r   )COCO)COCOevalrs   )LVISLVISEvalc                 S   s   g | ]	}t t|jqS r(   )r   r   r   r   r(   r(   r)   r     s    z0DetectionValidator.eval_json.<locals>.<listcomp>Nro   APAP50rm   APrzmetrics/APr(B)APczmetrics/APc(B)APfzmetrics/APf(B)fitnessz unable to run: )&r   rO   r   r   rM   rW   r   rB   rD   r   r   is_filer	   pycocotools.cocor   pycocotools.cocoevalr   rF   loadResr9   r   r   
_load_jsonr#   r   im_filesparamsimgIdsevaluate
accumulate	summarizer   rY   resultsr   r   	Exceptionr   )r"   rY   	pred_json	anno_jsonpkgr   r   r   annor   rP   r   r   er(   r(   r)   	eval_json  s\   $

&zDetectionValidator.eval_json)NNNNN)rP   N)__name__
__module____qualname____doc__r   r6   r[   ra   rl   r|   r   r   r   r   r   r   r   r   r   r   r   r   r  __classcell__r(   r(   r&   r)   r      s*    7
r   )rH   pathlibr   r   r   r   ultralytics.datar   r   r   ultralytics.engine.validatorr   ultralytics.utilsr   r   ultralytics.utils.checksr	   ultralytics.utils.metricsr
   r   r   ultralytics.utils.plottingr   r   r   r(   r(   r(   r)   <module>   s   