o
    Vh"                     @  sL  d dl mZ d dlZd dlZd dlmZ d dlmZm	Z	m
Z
mZmZmZmZmZmZ d dlZd dlZd dlmZ d dlmZ d dlmZmZmZ d dlmZmZmZ d d	l m!Z!m"Z" dHddZ#dIddZ$dJddZ%dKddZ&dd Z'dLd"d#Z(dMd&d'Z)dNd+d,Z*dOd0d1Z+dPd5d6Z,dQd8d9Z-dRd;d<Z.dSdAdBZ/dTdDdEZ0dTdFdGZ1dS )U    )annotationsN)suppress)	AnyCallableDictListLiteralSequenceTupleTypeUnion)
tv_tensors)sequence_to_str)_check_sequence_input_setup_angle_setup_size)get_dimensionsget_sizeis_pure_tensor)	_FillType_FillTypeJITarg.Union[int, float, Sequence[Union[int, float]]]namestrreturnSequence[float]c                 C  s   t | tttfst| dt|  t | tr*t| dvr*td| dt|  t | trF| D ]}t |ttfsEt| dt| q1t | ttfrWt| t| g} | S t | tr|t| dkrpt| d t| d g} | S t| d t| d g} | S )Nz2 should be a number or a sequence of numbers. Got )      zIf z0 is a sequence its length should be 1 or 2. Got z& should be a sequence of numbers. Got r   r   )
isinstanceintfloatr	   	TypeErrortypelen
ValueError)r   r   element r'   T/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/transforms/v2/_utils.py_setup_number_or_seq   s$   

r)   fill3Union[_FillType, Dict[Union[Type, str], _FillType]]Nonec                 C  sN   t | tr|  D ]}t| q	d S | d ur#t | tjttfs%tdd S d S )NzNGot inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.)	r   dictvalues_check_fill_argnumbersNumbertuplelistr"   )r*   valuer'   r'   r(   r/   )   s   

r/   r   r   c                 C  s0   | d u r| S t | ttfsdd t| D } | S )Nc                 S  s   g | ]}t |qS r'   )r!   ).0vr'   r'   r(   
<listcomp>;   s    z%_convert_fill_arg.<locals>.<listcomp>)r   r    r!   r3   )r*   r'   r'   r(   _convert_fill_arg2   s
   r8   $Dict[Union[Type, str], _FillTypeJIT]c                 C  s@   t |  t| tr|  D ]
\}}t|| |< q| S dt| iS )Nothers)r/   r   r-   itemsr8   )r*   kr6   r'   r'   r(   _setup_fill_arg?   s   
r=   c                 C  s,   || v r| | S d| v r| d S t d d S )Nr:   zWThis should never happen, please open an issue on the torchvision repo if you hit this.)RuntimeError)	fill_dict	inpt_typer'   r'   r(   	_get_fillJ   s
   rA   paddingUnion[int, Sequence[int]]c                 C  sZ   d|  d}t | ttfr"t| dvstdd | D s t|d S t | ts+t|d S )NzEPadding must be an int or a 1, 2, or 4 element of tuple or list, got .)r   r      c                 s  s    | ]}t |tV  qd S N)r   r    )r5   pr'   r'   r(   	<genexpr>W   s    z%_check_padding_arg.<locals>.<genexpr>)r   r2   r3   r$   allr%   r    )rB   err_msgr'   r'   r(   _check_padding_argS   s   
rK   padding_mode3Literal['constant', 'edge', 'reflect', 'symmetric']c                 C  s   | dvrt dd S )N)constantedgereflect	symmetriczBPadding mode should be either constant, edge, reflect or symmetric)r%   )rL   r'   r'   r(   _check_padding_mode_arg_   s   rR   inputsr   torch.Tensorc                 C  s   t | ttfr| d } t| r| S t | tjjs td|  dd}tt	 t
dd |  D }W d   n1 s<w   Y  |du rdtt	 t
dd |  D }W d   n1 s_w   Y  |du rltd| | S )	ap  
    This heuristic covers three cases:

    1. The input is tuple or list whose second item is a labels tensor. This happens for already batched
       classification inputs for MixUp and CutMix (typically after the Dataloder).
    2. The input is a tuple or list whose second item is a dictionary that contains the labels tensor
       under a label-like (see below) key. This happens for the inputs of detection models.
    3. The input is a dictionary that is structured as the one from 2.

    What is "label-like" key? We first search for an case-insensitive match of 'labels' inside the keys of the
    dictionary. This is the name our detection models expect. If we can't find that, we look for a case-insensitive
    match of the term 'label' anywhere inside the key, i.e. 'FooLaBeLBar'. If we can't find that either, the dictionary
    contains no "label-like" key.
    r   zWhen using the default labels_getter, the input passed to forward must be a dictionary or a two-tuple whose second item is a dictionary or a tensor, but got z	 instead.Nc                 s  s     | ]}|  d kr|V  qdS )labelsNlowerr5   keyr'   r'   r(   rH          z1_find_labels_default_heuristic.<locals>.<genexpr>c                 s  s     | ]}d |  v r|V  qdS )labelNrV   rX   r'   r'   r(   rH      rZ   zCould not infer where the labels are in the sample. Try passing a callable as the labels_getter parameter?If there are no labels in the sample by design, pass labels_getter=None.)r   r2   r3   r   collectionsabcMappingr%   r   StopIterationnextkeys)rS   candidate_keyr'   r'   r(   _find_labels_default_heuristicd   s.   

rc   labels_getter&Union[str, Callable[[Any], Any], None]Callable[[Any], Any]c                 C  s8   | dkrt S t| r| S | d u rdd S td|  d)Ndefaultc                 S  s   d S rF   r'   )_r'   r'   r(   <lambda>   s    z&_parse_labels_getter.<locals>.<lambda>zGlabels_getter should either be 'default', a callable, or None, but got rD   )rc   callabler%   )rd   r'   r'   r(   _parse_labels_getter   s   rk   flat_inputs	List[Any]tv_tensors.BoundingBoxesc                 C  s,   z
t dd | D W S  ty   tdw )zgReturn the Bounding Boxes in the input.

    Assumes only one ``BoundingBoxes`` object is present.
    c                 s  s     | ]}t |tjr|V  qd S rF   )r   r   BoundingBoxesr5   inptr'   r'   r(   rH      rZ   z%get_bounding_boxes.<locals>.<genexpr>z*No bounding boxes were found in the sample)r`   r_   r%   )rl   r'   r'   r(   get_bounding_boxes   s
   rr   Tuple[int, int, int]c                 C  sT   dd | D }|st dt|dkrtdtt| | \}}}|||fS )z"Return Channel, Height, and Width.c                 S  s2   h | ]}t |ttjtjjtjfrtt|qS r'   )
check_typer   r   ImagePILVideor2   r   rp   r'   r'   r(   	<setcomp>   s    
zquery_chw.<locals>.<setcomp>z)No image or video was found in the sampler   z/Found multiple CxHxW dimensions in the sample: r"   r$   r%   r   sortedpop)rl   chwschwr'   r'   r(   	query_chw   s   
r   Tuple[int, int]c                 C  sP   dd | D }|st dt|dkrtdtt| | \}}||fS )zReturn Height and Width.c              
   S  s:   h | ]}t |ttjtjjtjtjtjfrtt	|qS r'   )
rt   r   r   ru   rv   rw   Maskro   r2   r   rp   r'   r'   r(   rx      s    
zquery_size.<locals>.<setcomp>z=No image, video, mask or bounding box was found in the sampler   z-Found multiple HxW dimensions in the sample: ry   )rl   sizesr~   r   r'   r'   r(   
query_size   s   r   objtypes_or_checks.Tuple[Union[Type, Callable[[Any], bool]], ...]boolc                 C  s6   |D ]}t |trt | |r dS || r dS qdS NTFr   r#   )r   r   type_or_checkr'   r'   r(   rt      s   rt   "Union[Type, Callable[[Any], bool]]c                 G  s   | D ]
}t ||r dS qdS r   )rt   )rl   r   rq   r'   r'   r(   has_any   s
   
r   c                 G  s@   |D ]}| D ]}t |trt ||rn||r nq dS qdS )NFTr   )rl   r   r   rq   r'   r'   r(   has_all   s   r   )r   r   r   r   r   r   )r*   r+   r   r,   )r*   r   r   r   )r*   r+   r   r9   )rB   rC   r   r,   )rL   rM   r   r,   )rS   r   r   rT   )rd   re   r   rf   )rl   rm   r   rn   )rl   rm   r   rs   )rl   rm   r   r   )r   r   r   r   r   r   )rl   rm   r   r   r   r   )2
__future__r   collections.abcr\   r0   
contextlibr   typingr   r   r   r   r   r	   r
   r   r   	PIL.Imagerv   torchtorchvisionr   torchvision._utilsr   !torchvision.transforms.transformsr   r   r   $torchvision.transforms.v2.functionalr   r   r   +torchvision.transforms.v2.functional._utilsr   r   r)   r/   r8   r=   rA   rK   rR   rc   rk   rr   r   r   rt   r   r   r'   r'   r'   r(   <module>   s6    ,


	

	


,




