o
    Vh                     @   sb   d dl Z d dlZe d dd Zdd Zdd Zdd
dZdd Zdd ZdddZ	dd Z
dS )    NzThe 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. Please use the 'torchvision.transforms.functional' module instead.c                 C   s:   t | stdt|  |  dkstd|   dS )Nzclip should be Tensor. Got %s   zclip should be 4D. Got %dDT)torch	is_tensor	TypeErrortype
ndimension
ValueErrordimclip r   \/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/transforms/_functional_video.py_is_tensor_video_clip   s
   
r   c                 C   s6   t |  dkrtd| d||| ||| f S )z[
    Args:
        clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
    r   zclip should be a 4D tensor.)lensizer   )r   ijhwr   r   r   crop   s   r   c                 C   s0   t |dkrtd| tjjj| ||ddS )N   z9target size should be tuple (height, width), instead got F)r   modealign_corners)r   r   r   nn
functionalinterpolate)r   target_sizeinterpolation_moder   r   r   resize    s   r   bilinearc                 C   s0   t | stdt| ||||} t| ||} | S )a  
    Do spatial cropping and resizing to the video clip
    Args:
        clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the cropped region.
        w (int): Width of the cropped region.
        size (tuple(int, int)): height and width of resized clip
    Returns:
        clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
     clip should be a 4D torch.tensor)r   r   r   r   )r   r   r   r   r   r   r   r   r   r   resized_crop&   s
   r!   c                 C   s~   t | std| d| d}}|\}}||k s||k r#tdtt|| d }tt|| d }t| ||||S )Nr    z2height and width must be no smaller than crop_sizeg       @)r   r   r   introundr   )r   	crop_sizer   r   thtwr   r   r   r   r   center_crop:   s   r)   c                 C   s>   t |  | jtjkstdt| j |  ddddd S )a  
    Convert tensor data type from uint8 to float, divide value by 255.0 and
    permute the dimensions of clip tensor
    Args:
        clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
    Return:
        clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
    z/clip tensor should have data type uint8. Got %s   r      r   g     o@)r   dtyper   uint8r   strfloatpermuter
   r   r   r   	to_tensorG   s   	r1   Fc                 C   sx   t | std|s|  } tj|| j| jd}tj|| j| jd}| |dddddf |dddddf  | S )a  
    Args:
        clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
        mean (tuple): pixel RGB mean. Size is (3)
        std (tuple): pixel standard deviation. Size is (3)
    Returns:
        normalized clip (torch.tensor): Size is (C, T, H, W)
    r    )r,   deviceN)	r   r   cloner   	as_tensorr,   r2   sub_div_)r   meanstdinplacer   r   r   	normalizeV   s   	0r:   c                 C   s   t | std| dS )z
    Args:
        clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
    Returns:
        flipped clip (torch.tensor): Size is (C, T, H, W)
    r    r#   )r   r   flipr
   r   r   r   hflipi   s   
r<   )r   )F)warningsr   warnr   r   r   r!   r)   r1   r:   r<   r   r   r   r   <module>   s    



