o
    Vh>!                     @   s   d Z ddlmZmZmZ ddlZddlmZmZ ddlm	Z
mZ g dZG dd	 d	ejZG d
d dejZG dd dejZG dd dejZG dd dejZdS )z
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
    )OptionalTupleUnionN)nnTensor   )
functionalInterpolationMode)ObjectDetectionImageClassificationVideoClassificationSemanticSegmentationOpticalFlowc                   @   s:   e Zd ZdedefddZdefddZdefddZd	S )
r
   imgreturnc                 C   s"   t |ts
t|}t|tjS N)
isinstancer   Fpil_to_tensorconvert_image_dtypetorchfloatselfr    r   S/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/transforms/_presets.pyforward   s   

zObjectDetection.forwardc                 C      | j jd S Nz()	__class____name__r   r   r   r   __repr__      zObjectDetection.__repr__c                 C      	 dS )NzAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are rescaled to ``[0.0, 1.0]``.r   r"   r   r   r   describe      zObjectDetection.describeN)r!   
__module____qualname__r   r   strr#   r&   r   r   r   r   r
      s    r
   c                       s   e Zd Zdddejdddededeed	f d
eed	f dedee	 ddf fddZ
dedefddZdefddZdefddZ  ZS )r      g
ףp=
?gv/?gCl?gZd;O?gy&1?g?T)resize_sizemeanstdinterpolation	antialias	crop_sizer.   r/   .r0   r1   r2   r   Nc                   s>   t    |g| _|g| _t|| _t|| _|| _|| _d S r   )	super__init__r3   r.   listr/   r0   r1   r2   )r   r3   r.   r/   r0   r1   r2   r    r   r   r5   '   s   




zImageClassification.__init__r   c                 C   s`   t j|| j| j| jd}t || j}t|tst 	|}t 
|tj}t j|| j| jd}|S Nr1   r2   r/   r0   )r   resizer.   r1   r2   center_cropr3   r   r   r   r   r   r   	normalizer/   r0   r   r   r   r   r   9   s   

zImageClassification.forwardc                 C   h   | j jd }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d7 }|S N(z
    crop_size=
    resize_size=

    mean=	
    std=
    interpolation=
)r    r!   r3   r.   r/   r0   r1   r   format_stringr   r   r   r#   B      zImageClassification.__repr__c                 C   .   d| j  d| j d| j d| j d| j dS )NAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are resized to ``resize_size=`` using ``interpolation=.``, followed by a central crop of ``crop_size=]``. Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean=`` and ``std=``.r.   r1   r3   r/   r0   r"   r   r   r   r&   L      zImageClassification.describe)r!   r(   r)   r	   BILINEARintr   r   r   boolr5   r   r   r*   r#   r&   __classcell__r   r   r7   r   r   &   s0    

		
r   c                       s   e Zd Zddejddeeef deee eeef f deedf deedf d	ed
df fddZ	de
d
e
fddZd
efddZd
efddZ  ZS )r   )gFj?g.5B?g?)gr@H0?gc=yX?gDKK?)r/   r0   r1   r3   r.   r/   .r0   r1   r   Nc                   s<   t    t|| _t|| _t|| _t|| _|| _d S r   )r4   r5   r6   r3   r.   r/   r0   r1   )r   r3   r.   r/   r0   r1   r7   r   r   r5   V   s   
	




zVideoClassification.__init__vidc                 C   s   d}|j dk r|jdd}d}|j\}}}}}|d|||}tj|| j| jdd}t|| j	}t
|tj}tj|| j| jd}| j	\}}||||||}|dd	d
dd}|ra|jdd}|S )NF   r   )dimTr9   r:      r         )ndim	unsqueezeshapeviewr   r;   r.   r1   r<   r3   r   r   r   r=   r/   r0   permutesqueeze)r   rW   need_squeezeNTCHWr   r   r   r   f   s    

zVideoClassification.forwardc                 C   r>   r?   rF   rG   r   r   r   r#   ~   rI   zVideoClassification.__repr__c                 C   rJ   )NzAccepts batched ``(B, T, C, H, W)`` and single ``(T, C, H, W)`` video frame ``torch.Tensor`` objects. The frames are resized to ``resize_size=rL   rM   rN   rO   zP``. Finally the output dimensions are permuted to ``(..., C, T, H, W)`` tensors.rQ   r"   r   r   r   r&      rR   zVideoClassification.describe)r!   r(   r)   r	   rS   r   rT   r   r   r5   r   r   r*   r#   r&   rV   r   r   r7   r   r   U   s(    



r   c                       s   e Zd Zddejdddee deedf deedf d	ed
ee	 ddf fddZ
dedefddZdefddZdefddZ  ZS )r   r,   r-   T)r/   r0   r1   r2   r.   r/   .r0   r1   r2   r   Nc                   sB   t    |d ur|gnd | _t|| _t|| _|| _|| _d S r   )r4   r5   r.   r6   r/   r0   r1   r2   )r   r.   r/   r0   r1   r2   r7   r   r   r5      s   
	


zSemanticSegmentation.__init__r   c                 C   s^   t | jtrtj|| j| j| jd}t |tst|}t	|t
j}tj|| j| jd}|S r8   )r   r.   r6   r   r;   r1   r2   r   r   r   r   r   r=   r/   r0   r   r   r   r   r      s   

zSemanticSegmentation.forwardc                 C   sX   | j jd }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d7 }|S )Nr@   rA   rB   rC   rD   rE   )r    r!   r.   r/   r0   r1   rG   r   r   r   r#      s   zSemanticSegmentation.__repr__c              	   C   s&   d| j  d| j d| j d| j d	S )NrK   rL   rN   rO   rP   )r.   r1   r/   r0   r"   r   r   r   r&      s   zSemanticSegmentation.describe)r!   r(   r)   r	   rS   r   rT   r   r   rU   r5   r   r   r*   r#   r&   rV   r   r   r7   r   r      s*    

		r   c                   @   sF   e Zd Zdededeeef fddZdefddZdefdd	Zd
S )r   img1img2r   c                 C   s   t |ts
t|}t |tst|}t|tj}t|tj}tj|g dg dd}tj|g dg dd}| }| }||fS )N)      ?rl   rl   r:   )	r   r   r   r   r   r   r   r=   
contiguous)r   rj   rk   r   r   r   r      s   



zOpticalFlow.forwardc                 C   r   r   r   r"   r   r   r   r#      r$   zOpticalFlow.__repr__c                 C   r%   )NzAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are rescaled to ``[-1.0, 1.0]``.r   r"   r   r   r   r&      r'   zOpticalFlow.describeN)	r!   r(   r)   r   r   r   r*   r#   r&   r   r   r   r   r      s    r   )__doc__typingr   r   r   r   r   r    r   r   r	   __all__Moduler
   r   r   r   r   r   r   r   r   <module>   s    	/=,