o
    Wh*                     @   s  d Z ddlZddlZddlZddlmZ ddlm  mZ ddl	m
Z
mZ ddlmZmZmZmZ ddlmZmZ ddlmZmZmZmZmZmZmZ ddlmZmZ dd	lm Z m!Z!m"Z" dd
l#m$Z$m%Z% dZ&G dd dej'Z(G dd de(Z)G dd de(Z*G dd de(Z+G dd dej'Z,G dd de(Z-G dd dej'Z.G dd de(Z/G dd de/Z0G dd dej'Z1G d d! d!e(Z2dS )"zModel head modules.    N)	constant_xavier_uniform_)
TORCH_1_10	dist2bbox	dist2rboxmake_anchors)fuse_conv_and_bnsmart_inference_mode   )DFLSAVPEBNContrastiveHeadContrastiveHeadProtoResidual	SwiGLUFFN)ConvDWConv)MLPDeformableTransformerDecoder!DeformableTransformerDecoderLayer)bias_init_with_problinear_init)	DetectSegmentPoseClassifyOBBRTDETRDecoder	v10DetectYOLOEDetectYOLOESegmentc                       s   e Zd ZdZdZdZdZdZdZdZ	e
dZe
dZdZdZd fdd		Zd
d Zdd Zdd Zdd ZdddZedde
jdedefddZ  ZS )r   z&YOLO Detect head for detection models.FN,  r   P    c                    s  t    |_t|_d_|jd  _tj_	t
d|d d jd ft
|d tjd t fdd|D _jrWtfdd|D ntfdd|D _jd	krotjnt _jrtj_tj_d
S d
S )zRInitialize the YOLO detection layer with specified number of classes and channels.      r   d   c              
   3   s>    | ]}t t| d t  d t  dj dV  qdS )   r&   r
   N)nn
Sequentialr   Conv2dreg_max.0x)c2selfr$   O/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/nn/modules/head.py	<genexpr>.   s    .
z"Detect.__init__.<locals>.<genexpr>c              
   3   :    | ]}t t| d t  d t  jdV  qdS r(   r
   Nr)   r*   r   r+   ncr-   c3r1   r$   r2   r3   2      8 c                 3   sZ    | ](}t t t||d t| dt t  d t  dt  jdV  qdS r5   )r)   r*   r   r   r+   r7   r-   r8   r$   r2   r3   4   s    
r
   N)super__init__r7   lennlr,   notorchzerosstridemaxminr)   
ModuleListcv2legacycv3r   Identitydflend2endcopydeepcopyone2one_cv2one2one_cv3r1   r7   ch	__class__)r0   r9   r1   r2   r<   %   s*   

4
zDetect.__init__c                 C   sv   | j r| |S t| jD ]}t| j| || | j| || fd||< q| jr-|S | 	|}| j
r7|S ||fS )JConcatenates and returns predicted bounding boxes and class probabilities.r
   )rK   forward_end2endranger>   r@   catrF   rH   training
_inferenceexport)r1   r/   iyr$   r$   r2   forwardC   s   
2
zDetect.forwardc                    s   dd |D  fddt  jD }t  jD ]}t j| ||  j| || fd||< q jr<||dS  |} |	ddd j
 j} jrT|S |||dfS )a  
        Performs forward pass of the v10Detect module.

        Args:
            x (List[torch.Tensor]): Input feature maps from different levels.

        Returns:
            (dict | tuple):

                - If in training mode, returns a dictionary containing outputs of both one2many and one2one detections.
                - If not in training mode, returns processed detections or a tuple with processed detections and raw outputs.
        c                 S   s   g | ]}|  qS r$   )detachr.   xir$   r$   r2   
<listcomp>\       z*Detect.forward_end2end.<locals>.<listcomp>c                    s8   g | ]}t  j| |  j| | fd qS r
   )r@   rW   rN   rO   r.   r[   r1   x_detachr$   r2   ra   ]   s    ,r
   )one2manyone2oner      )rV   r>   r@   rW   rF   rH   rX   rY   postprocesspermutemax_detr7   rZ   )r1   r/   rh   r[   r\   r$   re   r2   rU   O   s   2

zDetect.forward_end2endc           
         s  |d j t fdd|D d} jdkr3 js  j kr3dd t| jdD \ _ _ _  j	rV jd	v rV|d
d
d
 j
d f }|d
d
 j
d d
f }n| j
d  jfd\}} j	r jdv rd }d }tj||||g|jdddd} j jd |  }  ||  jd|d
d
d
df  }	n< j	rՈ jdkrՈ j | j  jd j dd}	|	dd| dddfS   | jd j }	t|	| fdS )aM  
        Decode predicted bounding boxes and class probabilities based on multiple-level feature maps.

        Args:
            x (List[torch.Tensor]): List of feature maps from different detection layers.

        Returns:
            (torch.Tensor): Concatenated tensor of decoded bounding boxes and class probabilities.
        r   c                    s    g | ]}| d   jdqS )r   )viewr?   r_   r1   shaper$   r2   ra   u   s     z%Detect._inference.<locals>.<listcomp>ri   imxc                 s       | ]	}| d dV  qdS r   r
   N	transposer-   r$   r$   r2   r3   w       z$Detect._inference.<locals>.<genexpr>      ?>   pbtfjstfliteedgetpusaved_modelNr&   r
      rz   r{   r(   deviceF)xywh)rp   r@   rW   formatdynamicr   rB   anchorsstridesrZ   r,   splitr7   tensorr   reshapedecode_bboxesrJ   	unsqueezeru   sigmoidrk   )
r1   r/   x_catboxclsgrid_hgrid_w	grid_sizenormdboxr$   ro   r2   rY   i   s,   
 "4  zDetect._inferencec                 C   s   | }t |j|j|jD ]&\}}}d|d jjdd< td|j d| d  |d jjd|j< q| j	ret |j
|j|jD ](\}}}d|d jjdd< td|j d| d  |d jjd|j< q>dS dS )BInitialize Detect() biases, WARNING: requires stride availability.      ?rm   N     ri   )ziprF   rH   rB   biasdatamathlogr7   rK   rN   rO   r1   mabsr$   r$   r2   	bias_init   s   00zDetect.bias_initTc                 C   s   t |||o| jp
| j ddS )zDecode bounding boxes.r
   )r   dim)r   rK   xyxy)r1   bboxesr   r   r$   r$   r2   r      s   zDetect.decode_bboxespredsrl   r7   c           
      C   s   | j \}}}| jd|gdd\}}|jddt||d d}|jd|dddd}|jd|dd|d}|dt||\}}t	
|d }	t	j||	|| f |d || d  gddS )a4  
        Post-processes YOLO model predictions.

        Args:
            preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc) with last dimension
                format [x, y, w, h, class_probs].
            max_det (int): Maximum detections per image.
            nc (int, optional): Number of classes. Default: 80.

        Returns:
            (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6) and last
                dimension format [x, y, w, h, max_class_prob, class_index].
        r&   rm   r   r
   )r   index).N)rp   r   amaxtopkrD   r   gatherrepeatflattenr@   arangerW   float)
r   rl   r7   
batch_sizer   _boxesscoresr   r[   r$   r$   r2   rj      s   "0zDetect.postprocessr#   r$   T)r#   )__name__
__module____qualname____doc__r   rZ   r   rK   rl   rp   r@   emptyr   r   rG   r   r<   r]   rU   rY   r   r   staticmethodTensorintrj   __classcell__r$   r$   rR   r2   r      s(    

)
$r   c                       s*   e Zd ZdZd
 fdd	Zdd	 Z  ZS )r   z*YOLO Segment head for segmentation models.r#          r$   c                    sd   t  || |_|_t|d jj_t|d d j t fdd|D _	dS )ziInitialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.r   r&   c              
   3   r4   r5   r)   r*   r   r+   nmr-   c4r1   r$   r2   r3      r:   z#Segment.__init__.<locals>.<genexpr>N)
r;   r<   r   nprr   protorC   r)   rE   cv4)r1   r7   r   r   rQ   rR   r   r2   r<      s    zSegment.__init__c                    s    d }|jd  t fddtjD d}tjr,||fS j	r9t|gd|fS td |gdd ||ffS )gReturn model outputs and mask coefficients if training, otherwise return outputs and mask coefficients.r   c                    *   g | ]}j | |  jd qS rm   )r   rn   r   rd   bsr1   r/   r$   r2   ra         * z#Segment.forward.<locals>.<listcomp>ri   r
   )
r   rp   r@   rW   rV   r>   r   r]   rX   rZ   )r1   r/   pmcr$   r   r2   r]      s   
$
<zSegment.forward)r#   r   r   r$   r   r   r   r   r<   r]   r   r$   r$   rR   r2   r          
r   c                       2   e Zd ZdZd fdd	Zdd Zd	d
 Z  ZS )r   z;YOLO OBB detection head for detection with rotation models.r#   r
   r$   c                    sH   t  || |_t|d d j t fdd|D _dS )zCInitialize OBB with number of classes `nc` and layer channels `ch`.r   r&   c              
   3   r4   r5   )r)   r*   r   r+   ner-   r   r$   r2   r3      r:   zOBB.__init__.<locals>.<genexpr>N)r;   r<   r   rC   r)   rE   r   )r1   r7   r   rQ   rR   r   r2   r<      s    zOBB.__init__c                    s   d j d  t fddtjD d}| d tj }js(|_	t
jr5|fS jr@t|gdS td |gdd |ffS )rT   r   c                    r   r   )r   rn   r   rd   r   r$   r2   ra      r   zOBB.forward.<locals>.<listcomp>ri   g      ?r
   )rp   r@   rW   rV   r>   r   r   pirX   angler   r]   rZ   )r1   r/   r   r$   r   r2   r]      s   $6zOBB.forwardc                 C   s   t || j|ddS )zDecode rotated bounding boxes.r
   r   )r   r   )r1   r   r   r$   r$   r2   r      s   zOBB.decode_bboxes)r#   r
   r$   )r   r   r   r   r<   r]   r   r   r$   r$   rR   r2   r      s
    r   c                       r   )r   z$YOLO Pose head for keypoints models.r#      r(   r$   c                    sZ   t  || |_|d |d  _t|d d j t fdd|D _dS )zIInitialize YOLO network with default parameters and Convolutional Layers.r   r
   r&   c              
   3   r4   r5   )r)   r*   r   r+   nkr-   r   r$   r2   r3      r:   z Pose.__init__.<locals>.<genexpr>N)r;   r<   	kpt_shaper   rC   r)   rE   r   )r1   r7   r   rQ   rR   r   r2   r<      s
    zPose.__init__c                    s   d j d  t fddtjD d}tjr&|fS  |}j	r7t|gdS td |gdd |ffS )z?Perform forward pass through YOLO model and return predictions.r   c                    r   r   )r   rn   r   rd   r   r$   r2   ra     r   z Pose.forward.<locals>.<listcomp>rm   r
   )
rp   r@   rW   rV   r>   r   r]   rX   kpts_decoderZ   )r1   r/   kptpred_kptr$   r   r2   r]      s   $6zPose.forwardc           
      C   s  | j d }| jr| jdv rT|j|g| j dR  }| jd | jd }}tj||g|jdddd}| j	| j
d |  }|ddddddf d	 | jd
  | }	n#|j|g| j dR  }|ddddddf d	 | jd
  | j	 }	|dkrt|	|ddddddf  fd}	|	|| jdS | }|dkr|dddd|f  |dddd|f< |dddd|f d	 | jd d
  | j	 |dddd|f< |dddd|f d	 | jd d
  | j	 |dddd|f< |S )zDecodes keypoints.r
   r}   rm   ri   r(   r~   r   N       @rw   )r   rZ   r   rn   rp   r@   r   r   r   r   rB   r   rW   r   r   clone)
r1   r   kptsndimr\   r   r   r   r   r   r$   r$   r2   r   	  s&   

..*,@@zPose.kpts_decode)r#   r   r$   )r   r   r   r   r<   r]   r   r   r$   r$   rR   r2   r      s
    	
r   c                       .   e Zd ZdZdZd	 fdd	Zdd Z  ZS )
r   z8YOLO classification head, i.e. x(b,c1,20,20) to x(b,c2).Fr
   Nc                    sP   t    d}t||||||| _td| _tjddd| _t	||| _
dS )zaInitializes YOLO classification head to transform input tensor from (b,c1,20,20) to (b,c2) shape.i   r
           T)r   inplaceN)r;   r<   r   convr)   AdaptiveAvgPool2dpoolDropoutdropLinearlinear)r1   c1r0   kr   r   gc_rR   r$   r2   r<   ,  s   
zClassify.__init__c              	   C   s^   t |trt|d}| | | | |d}| j	r!|S |
d}| jr+|S ||fS )z>Performs a forward pass of the YOLO model on input image data.r
   )
isinstancelistr@   rW   r   r   r   r   r   rX   softmaxrZ   )r1   r/   r\   r$   r$   r2   r]   5  s   
"
zClassify.forward)r
   r
   Nr
   )r   r   r   r   rZ   r<   r]   r   r$   r$   rR   r2   r   '  s
    	r   c                       s2   e Zd ZdZd fdd	Zdd	 Zd
d Z  ZS )WorldDetect\Head for integrating YOLO detection models with semantic understanding from text embeddings.r#      Fr$   c                    s`   t  || t|d t| jd t fdd|D | _tfdd|D | _dS )FInitialize YOLO detection layer with nc classes and layer channels ch.r   r'   c              
   3   8    | ]}t t| d t  d t  dV  qdS r5   r)   r*   r   r+   r-   r9   embedr$   r2   r3   G     6 z'WorldDetect.__init__.<locals>.<genexpr>c                 3   "    | ]}r
t  nt V  qd S Nr   r   r.   r   r   with_bnr$   r2   r3   H       N)	r;   r<   rC   rD   r7   r)   rE   rH   r   r1   r7   r   r   rQ   rR   r9   r   r   r2   r<   C  s    zWorldDetect.__init__c                 C   s   t | jD ] }t| j| || | j| | j| || |fd||< q| jr+|S | j| j	d  | _
| |}| jr>|S ||fS )rT   r
   r&   )rV   r>   r@   rW   rF   r   rH   rX   r7   r,   r?   rY   rZ   )r1   r/   textr[   r\   r$   r$   r2   r]   J  s   >
zWorldDetect.forwardc                 C   s:   | }t |j|j|jD ]\}}}d|d jjdd< qdS )r   r   rm   N)r   rF   rH   rB   r   r   r   r$   r$   r2   r   T  s   zWorldDetect.bias_initr#   r   Fr$   )r   r   r   r   r<   r]   r   r   r$   r$   rR   r2   r   @  s
    
r   c                       s2   e Zd ZdZd	 fdd	Zdd Zdd Z  ZS )
LRPCHeadzSLightweight Region Proposal and Classification Head for efficient object detection.Tc                    s4   t    |r| |n|| _|| _|| _|| _dS )zRInitialize LRPCHead with vocabulary, proposal filter, and localization components.N)r;   r<   conv2linearvocabpflocenabled)r1   r  r  r	  r
  rR   r$   r2   r<   a  s
   

zLRPCHead.__init__c                 C   sP   t |tjr|jdksJ t|j|j}|j|jdj	|j_	|j
j	|j
_	|S )z4Convert a 1x1 convolutional layer to a linear layer.)r
   r
   rm   )r   r)   r+   kernel_sizer   in_channelsout_channelsweightrn   r   r   )r1   r   r   r$   r$   r2   r  i  s
   zLRPCHead.conv2linearc                 C   s   | j r?| |d d}| |k}|ddd}| |r(|dd|f n||d  }| ||ddf|fS | |}| |}||dft	j
|jd |jd  |jt	jdfS )	zQProcess classification and localization features to generate detection proposals.)r   r   r   ri   rm   Nr(   )r   dtype)r
  r  r   r   ru   r  r   r   r	  r@   onesrp   r   bool)r1   cls_featloc_featconfpf_scoremaskr$   r$   r2   r]   q  s   ,

zLRPCHead.forwardr   )r   r   r   r   r<   r  r]   r   r$   r$   rR   r2   r  ^  s
    r  c                       s`   e Zd ZdZdZd fdd	Ze dd	 Zd
d Zdd Z	dddZ
dddZdd Z  ZS )r    r   Fr#   r   r$   c                    s   t  || t|d t| jd  ksJ du sJ | jr.t fdd|D nt fdd|D | _tfdd|D | _	t
t| _t| | _| _dS )	r   r   r'   Tc              
   3   r   r5   r   r-   r   r$   r2   r3     r   z'YOLOEDetect.__init__.<locals>.<genexpr>c                 3   sX    | ]'}t t t||d t| dt t  d t  dt  dV  qdS r5   )r)   r*   r   r   r+   r-   r   r$   r2   r3     s    
c                 3   r   r   r   r   r   r$   r2   r3     r   N)r;   r<   rC   rD   r7   rG   r)   rE   rH   r   r   r   reprtar   savper   r  rR   r  r2   r<     s   
zYOLOEDetect.__init__c                 C   sd  | j rdS | jr
J |tjd}t| j| jD ]\}}t	|t
js&J t	|ts-J |d }t	|t
js9J |j}|j}|j}||  }t||}|jjdd}	|jj}
||	 }	||
dd d}t|| }t
j|j|	jd ddd|jj}|jj|	dd |jj||  ||d< |  q| `t
 | _d| _ dS )z>Fuse text features with model weights for efficient inference.Nr   rm   r
   )r  FT) is_fusedrX   tor@   float32squeezer   rH   r   r   r)   r*   r   r+   logit_scaler   r   expr   r  r   r   r   	ones_liker  rp   requires_grad_r   copy_fuser  rI   )r1   	txt_featscls_headbn_headr   r  r   r   twr   b1b2r$   r$   r2   r#    sD   






zYOLOEDetect.fusec                 C   s"   |du rdS t j| |dddS )z.Get text prompt embeddings with normalization.Nrm   ri   )r   r   )F	normalizer  )r1   tper$   r$   r2   get_tpe     "zYOLOEDetect.get_tpec                 C   sZ   |j d dkrtj|d j d d| j|d jdS |jdkr$| ||}|jdks+J |S )z4Get visual prompt embeddings with spatial awareness.r
   r   r~   r&   r(   )rp   r@   rA   r   r   r   r  )r1   r/   vper$   r$   r2   get_vpe  s   $
zYOLOEDetect.get_vpec                    s,  g } j s	J dt jD ]<} j| || } j| || }t j| ts,J  j| || jr: j	s:dnt
 dd\||< }|| q|d d j j	sZ jkrrdd tdd |D  jd	D \ _ _ _t fd
d|D d}tdd |D d}	 jrЈ jdv rЈd }
d }tj||
||
g|jdddd} j jd |  }  ||  jd|ddddf  }n  | jd j }t|}t jr j	s|n|d|f |	 fd}|r jr||fS ||f|fS  jr|S ||fS )zYProcess features with fused text embeddings to generate detections for prompt-free model.z1Prompt-free inference requires model to be fused!r   r  gMbP?c                 s   rr   rs   rt   r-   r$   r$   r2   r3     rv   z+YOLOEDetect.forward_lrpc.<locals>.<genexpr>c                 S      g | ]}|d  qS )r   r$   )r.   r   r$   r$   r2   ra     rb   z,YOLOEDetect.forward_lrpc.<locals>.<listcomp>rw   c                    s(   g | ]}|d   d   jd dqS )r   r&   rm   )rn   r,   r_   ro   r$   r2   ra     s   ( ri   c                 S   r2  rc   r$   r_   r$   r$   r2   ra     rb   r}   r(   r~   r
   r&   N.)r  rV   r>   rH   rF   r   lrpcr  rZ   r   getattrappendrp   r   rB   r   r   r@   rW   r   r   r   r   r   rJ   r   r   )r1   r/   return_maskmasksr[   r  r  r  r   r   r   r   r   r   r   r\   r$   ro   r2   forward_lrpc  s8   *"4 
,zYOLOEDetect.forward_lrpcc                 C   s   t | dr| ||S t| jD ] }t| j| || | j| | j| || |fd||< q| j	r6|S | j
| jd  | _| |}| jrI|S ||fS )zEProcess features with class prompt embeddings to generate detections.r3  r
   r&   )hasattrr8  rV   r>   r@   rW   rF   r   rH   rX   r7   r,   r?   rY   rZ   )r1   r/   cls_per6  r[   r\   r$   r$   r2   r]     s   
>
zYOLOEDetect.forwardc                 C   s|   | }t |j|j|j|jD ].\}}}}d|d jjdd< d|d jjdd< td|j	 d| d  |jjdd< qdS )z&Initialize biases for detection heads.r   rm   Nr   r   r   ri   )
r   rF   rH   r   rB   r   r   r   r   r7   )r1   r   r   r   cr   r$   r$   r2   r     s   "*zYOLOEDetect.bias_initr  )F)r   r   r   r   r  r<   r	   r#  r.  r1  r8  r]   r   r   r$   r$   rR   r2   r      s    
.
	
&r    c                       s*   e Zd ZdZd fdd		Zd
d Z  ZS )r!   z8YOLO segmentation head with text embedding capabilities.r#   r   r   r   Fr$   c                    sh   t  |||| |_|_t|d jj_t|d d j t fdd|D _	dS )zTInitialize YOLOESegment with class count, mask parameters, and embedding dimensions.r   r&   c              
   3   r4   r5   r   r-   c5r1   r$   r2   r3   $  r:   z(YOLOESegment.__init__.<locals>.<genexpr>N)
r;   r<   r   r   r   r   rC   r)   rE   cv5)r1   r7   r   r   r   r   rQ   rR   r<  r2   r<     s    zYOLOESegment.__init__c                    s    d }|jd  t fddtjD d}td}|s-t|ntj|dd\}j	r@||fS |rTj
rNjsN||  n|d|f }j
rat|gd	|fS td |gd	d	 ||ffS )
r   r   c                    r   r   )r>  rn   r   rd   r   r$   r2   ra   +  r   z(YOLOESegment.forward.<locals>.<listcomp>ri   r3  T)r6  .r
   )r   rp   r@   rW   rV   r>   r9  r    r]   rX   rZ   r   r   )r1   r/   r  r   r   has_lrpcr  r$   r   r2   r]   &  s   
$

$<zYOLOESegment.forward)r#   r   r   r   Fr$   r   r$   r$   rR   r2   r!     r   r!   c                       s   e Zd ZdZdZddddddd	d
de dddddf fdd	Zd ddZde	j
ddfddZdd Zd!ddZdd Z  ZS )"r   a  
    Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection.

    This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes
    and class labels for objects in an image. It integrates features from multiple layers and runs through a series of
    Transformer decoder layers to output the final predictions.
    Fr#   )r      i   r   r"   r&         r@  r   rm   r'   rw   r   c                    sF  t     | _|| _t|| _| _|| _|| _t	
 fdd|D | _t |||	|
| j|}t |||| _t	 | _|| _|| _|| _|| _|rUt	| | _tdd   dd| _t	t	  t	 | _t	 | _t  ddd| _t	
 fddt|D | _t	
 fd	dt|D | _ | !  d
S )a~  
        Initializes the RTDETRDecoder module with the given parameters.

        Args:
            nc (int): Number of classes. Default is 80.
            ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048).
            hd (int): Dimension of hidden layers. Default is 256.
            nq (int): Number of query points. Default is 300.
            ndp (int): Number of decoder points. Default is 4.
            nh (int): Number of heads in multi-head attention. Default is 8.
            ndl (int): Number of decoder layers. Default is 6.
            d_ffn (int): Dimension of the feed-forward networks. Default is 1024.
            dropout (float): Dropout rate. Default is 0.0.
            act (nn.Module): Activation function. Default is nn.ReLU.
            eval_idx (int): Evaluation index. Default is -1.
            nd (int): Number of denoising. Default is 100.
            label_noise_ratio (float): Label noise ratio. Default is 0.5.
            box_noise_scale (float): Box noise scale. Default is 1.0.
            learnt_init_query (bool): Whether to learn initial query embeddings. Default is False.
        c              	   3   s0    | ]}t t j| d ddt  V  qdS )r
   F)r   N)r)   r*   r+   BatchNorm2dr-   hdr$   r2   r3   w  s   . z)RTDETRDecoder.__init__.<locals>.<genexpr>r&   ri   
num_layersr(   c                    s   g | ]}t  qS r$   )r)   r   r   rE  r7   r$   r2   ra     s    z*RTDETRDecoder.__init__.<locals>.<listcomp>c                    s   g | ]
}t   d ddqS )r&   r(   rF  )r   r   rD  r$   r2   ra     s    N)"r;   r<   
hidden_dimnheadr=   r>   r7   num_queriesnum_decoder_layersr)   rE   
input_projr   r   decoder	Embeddingdenoising_class_embednum_denoisinglabel_noise_ratiobox_noise_scalelearnt_init_query	tgt_embedr   query_pos_headr*   r   	LayerNorm
enc_outputenc_score_headenc_bbox_headrV   dec_score_headdec_bbox_head_reset_parameters)r1   r7   rQ   rE  nqndpnhndld_ffndropoutacteval_idxndrR  rS  rT  decoder_layerrR   rH  r2   r<   G  s0   
'
 zRTDETRDecoder.__init__Nc              
   C   s   ddl m} | |\}}||| j| j| jj| j| j| j	| j
\}}}}	| ||||\}
}}}| j|
|||| j| j| j|d\}}|||||	f}| j
rN|S t|d|d fd}| jrc|S ||fS )a  
        Runs the forward pass of the module, returning bounding box and classification scores for the input.

        Args:
            x (List[torch.Tensor]): List of feature maps from the backbone.
            batch (dict, optional): Batch information for training.

        Returns:
            (tuple | torch.Tensor): During training, returns a tuple of bounding boxes, scores, and other metadata.
                During inference, returns a tensor of shape (bs, 300, 4+nc) containing bounding boxes and class scores.
        r   )get_cdn_group)	attn_maskrm   )ultralytics.models.utils.opsrh  _get_encoder_inputr7   rK  rP  r  rQ  rR  rS  rX   _get_decoder_inputrN  r\  r[  rV  r@   rW   r  r   rZ   )r1   r/   batchrh  featsshapesdn_embeddn_bboxri  dn_metar   
refer_bbox
enc_bboxes
enc_scores
dec_bboxes
dec_scoresr\   r$   r$   r2   r]     s8   

 zRTDETRDecoder.forwardg?cpu{Gz?c                 C   s*  g }t |D ]c\}\}}	tj|||d}
tj|	||d}tr&tj|
|ddnt|
|\}}t||gd}tj|	|g||d}|dd | }tj|||d| d|  }|	t
||gdd||	 d	 qt
|d
}||k|d
| k @ jddd}t|d
|  }|| td}||fS )ag  
        Generates anchor bounding boxes for given shapes with specific grid size and validates them.

        Args:
            shapes (list): List of feature map shapes.
            grid_size (float, optional): Base size of grid cells. Default is 0.05.
            dtype (torch.dtype, optional): Data type for tensors. Default is torch.float32.
            device (str, optional): Device to create tensors on. Default is "cpu".
            eps (float, optional): Small value for numerical stability. Default is 1e-2.

        Returns:
            (tuple): Tuple containing anchors and valid mask tensors.
        )endr  r   ij)indexingrm   r  r   r   rw   r   r&   r
   T)keepdiminf)	enumerater@   r   r   meshgridstackr   r   r   r5  rW   rn   allr   masked_fillr   )r1   ro  r   r  r   epsr   r[   hr(  sysxgrid_ygrid_xgrid_xyvalid_WHwh
valid_maskr$   r$   r2   _generate_anchors  s   $&zRTDETRDecoder._generate_anchorsc                    sv    fddt |D }g }g }|D ]}|jdd \}}||dddd |||g qt|d}||fS )a0  
        Processes and returns encoder inputs by getting projection features from input and concatenating them.

        Args:
            x (List[torch.Tensor]): List of feature maps from the backbone.

        Returns:
            (tuple): Tuple containing processed features and their shapes.
        c                    s   g | ]\}} j | |qS r$   )rM  )r.   r[   featr1   r$   r2   ra     s    z4RTDETRDecoder._get_encoder_input.<locals>.<listcomp>ri   Nr   r
   )r  rp   r5  r   rk   r@   rW   )r1   r/   rn  ro  r  r  r(  r$   r  r2   rk    s   z RTDETRDecoder._get_encoder_inputc                 C   s^  |j d }| j||j|jd\}}| || }| |}	tj|	dj	| j
ddjd}
tj||
jddd| j
d}|||
f || j
d}|dd|
f || j
d}| || }| }|durrt||gd}|	||
f || j
d}| jr| jjd|ddn|}| jr| }| js| }|durt||gd}||||fS )a,  
        Generates and prepares the input required for the decoder from the provided features and shapes.

        Args:
            feats (torch.Tensor): Processed features from encoder.
            shapes (list): List of feature map shapes.
            dn_embed (torch.Tensor, optional): Denoising embeddings. Default is None.
            dn_bbox (torch.Tensor, optional): Denoising bounding boxes. Default is None.

        Returns:
            (tuple): Tuple containing embeddings, reference bounding boxes, encoded bounding boxes, and scores.
        r   r}  rm   r
   r   )rz  r  N)rp   r  r  r   rX  rY  r@   r   rC   valuesrK  indicesrn   r   r   r   rZ  r   rW   rT  rU  r  rX   r^   )r1   rn  ro  rp  rq  r   r   r  featuresenc_outputs_scorestopk_ind	batch_indtop_k_featurestop_k_anchorsrs  rt  ru  
embeddingsr$   r$   r2   rl    s*   

"&"z RTDETRDecoder._get_decoder_inputc                 C   s  t dd | j }t| jj| t| jjd jd t| jjd jd t| j	| j
D ]\}}t|j| t|jd jd t|jd jd q+t| jd  t| jd j | jr`t| jj t| jjd j t| jjd j | jD ]	}t|d j qudS )zjInitializes or resets the parameters of the model's various components with predefined weights and biases.ry  r#   rm   r   r   r
   N)r   r7   r   rY  r   rZ  layersr  r   r[  r\  r   rX  r   rT  rU  rV  rM  )r1   bias_clscls_reg_layerr$   r$   r2   r]  2  s"   
zRTDETRDecoder._reset_parametersr   )NN)r   r   r   r   rZ   r)   ReLUr<   r]   r@   r  r  rk  rl  r]  r   r$   r$   rR   r2   r   <  s0    
O1 
1r   c                       r   )
r   a  
    v10 Detection head from https://arxiv.org/pdf/2405.14458.

    Args:
        nc (int): Number of classes.
        ch (tuple): Tuple of channel sizes.

    Attributes:
        max_det (int): Maximum number of detections.

    Methods:
        __init__(self, nc=80, ch=()): Initializes the v10Detect object.
        forward(self, x): Performs forward pass of the v10Detect module.
        bias_init(self): Initializes biases of the Detect module.

    Tr#   r$   c                    sR   t  || t|d tjd t fdd|D _t	j_
dS )zYInitializes the v10Detect object with the specified number of classes and input channels.r   r'   c                 3   sb    | ],}t t t||d |dt| dt t  d  dt  dt  jdV  qdS )r(   )r   r
   Nr6   r-   r8   r$   r2   r3   d  s    
z%v10Detect.__init__.<locals>.<genexpr>N)r;   r<   rC   rD   r7   r)   rE   rH   rL   rM   rO   rP   rR   r8   r2   r<   _  s   
zv10Detect.__init__c                 C   s"   t t  g| j  | _| _dS )zRemoves the one2many head.N)r)   rE   rI   r>   rF   rH   r  r$   r$   r2   r#  n  r/  zv10Detect.fuser   )r   r   r   r   rK   r<   r#  r   r$   r$   rR   r2   r   K  s
    r   )3r   rL   r   r@   torch.nnr)   torch.nn.functional
functionalr+  torch.nn.initr   r   ultralytics.utils.talr   r   r   r   ultralytics.utils.torch_utilsr   r	   blockr   r   r   r   r   r   r   r   r   r   transformerr   r   r   utilsr   r   __all__Moduler   r   r   r   r   r   r  r    r!   r   r   r$   r$   r$   r2   <module>   s:   $ &4# #  