o
    Vh5                     @   s   d dl Z d dlmZmZmZmZmZmZ d dlZd dlm	Z	 ddl
mZmZ ejjjZG dd dejjZG dd	 d	ejjZG d
d deZG dd deZG dd dejjZG dd dejjZG dd dejjZdS )    N)CallableListOptionalSequenceTupleUnion)Tensor   )_log_api_usage_once_make_ntuplec                       s   e Zd ZdZ	ddedef fddZdeded	ed
e	de
e de
e de
e f fddZdedefddZdefddZ  ZS )FrozenBatchNorm2da!  
    BatchNorm2d where the batch statistics and the affine parameters are fixed

    Args:
        num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
        eps (float): a value added to the denominator for numerical stability. Default: 1e-5
    h㈵>num_featuresepsc                    sd   t    t|  || _| dt| | dt| | dt| | dt| d S )Nweightbiasrunning_meanrunning_var)super__init__r
   r   register_buffertorchoneszeros)selfr   r   	__class__ H/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/ops/misc.pyr      s   
zFrozenBatchNorm2d.__init__
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsc           	   	      s2   |d }||v r||= t  ||||||| d S )Nnum_batches_tracked)r   _load_from_state_dict)	r   r   r    r!   r"   r#   r$   r%   num_batches_tracked_keyr   r   r   r'   #   s   
z'FrozenBatchNorm2d._load_from_state_dictxreturnc                 C   sr   | j dddd}| jdddd}| jdddd}| jdddd}||| j   }|||  }|| | S )N   )r   reshaper   r   r   r   rsqrt)r   r)   wbrvrmscaler   r   r   r   forward5   s   zFrozenBatchNorm2d.forwardc                 C   s$   | j j d| jjd  d| j dS )N(r   z, eps=))r   __name__r   shaper   )r   r   r   r   __repr__@   s   $zFrozenBatchNorm2d.__repr__)r   )r7   
__module____qualname____doc__intfloatr   dictstrboolr   r'   r   r4   r9   __classcell__r   r   r   r   r      s2    r   c                       s   e Zd Zddddejjejjdddejjf
dedede	ee
edf f d	e	ee
edf f d
ee	ee
edf ef  dedeedejjf  deedejjf  de	ee
edf f dee dee dedejjf ddf fddZ  ZS )ConvNormActivation   r+   NTin_channelsout_channelskernel_size.stridepaddinggroups
norm_layeractivation_layerdilationinplacer   
conv_layerr*   c              
      s  |d u r<t trt  trd d   }n%t tr tnt }t|t | t fddt|D }|d u rD|d u }||||| ||dg}|d ur\||| |d urt|
d u rfi nd|
i}||di | t j	|  t
|  || _| jtkrtd d S d S )	Nr+   r	   c                 3   s(    | ]}| d  d  |  V  qdS )r+   r	   Nr   ).0irM   rG   r   r   	<genexpr>\   s   & z.ConvNormActivation.__init__.<locals>.<genexpr>)rM   rJ   r   rN   zhDon't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead.r   )
isinstancer=   r   lenr   tuplerangeappendr   r   r
   rF   r   rC   warningswarn)r   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   r   rO   	_conv_dimlayersparamsr   rR   r   r   E   sB   


zConvNormActivation.__init__)r7   r:   r;   r   nnBatchNorm2dReLUConv2dr=   r   r   r   r@   r   ModulerA   r   rB   r   r   r   r   rC   D   sL    	
rC   c                       s   e Zd ZdZddddejjejjdddf	dedede	ee
eef f d	e	ee
eef f d
ee	ee
eef ef  dedeedejjf  deedejjf  de	ee
eef f dee dee ddf fddZ  ZS )Conv2dNormActivationa  
    Configurable block used for Convolution2d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.

    rD   r+   NTrE   rF   rG   rH   rI   rJ   rK   .rL   rM   rN   r   r*   c                    *   t  |||||||||	|
|tjj d S N)r   r   r   r^   ra   r   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   r   r   r   r   r         zConv2dNormActivation.__init__)r7   r:   r;   r<   r   r^   r_   r`   r=   r   r   r   r@   r   rb   rA   r   rB   r   r   r   r   rc   }   sH    	
rc   c                       s   e Zd ZdZddddejjejjdddf	dedede	ee
eeef f d	e	ee
eeef f d
ee	ee
eeef ef  dedeedejjf  deedejjf  de	ee
eeef f dee dee ddf fddZ  ZS )Conv3dNormActivationa  
    Configurable block used for Convolution3d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input video.
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
    rD   r+   NTrE   rF   rG   rH   rI   rJ   rK   .rL   rM   rN   r   r*   c                    rd   re   )r   r   r   r^   Conv3drf   r   r   r   r      rg   zConv3dNormActivation.__init__)r7   r:   r;   r<   r   r^   BatchNorm3dr`   r=   r   r   r   r@   r   rb   rA   r   rB   r   r   r   r   rh      sH    	
rh   c                       s   e Zd ZdZejjejjfdedede	dejj
f de	dejj
f ddf
 fd	d
ZdedefddZdedefddZ  ZS )SqueezeExcitationaE  
    This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
    Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.

    Args:
        input_channels (int): Number of channels in the input image
        squeeze_channels (int): Number of squeeze channels
        activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
        scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
    input_channelssqueeze_channels
activation.scale_activationr*   Nc                    sX   t    t|  tjd| _tj||d| _tj||d| _	| | _
| | _d S )Nr+   )r   r   r
   r   r^   AdaptiveAvgPool2davgpoolra   fc1fc2rn   ro   )r   rl   rm   rn   ro   r   r   r   r      s   
zSqueezeExcitation.__init__inputc                 C   s2   |  |}| |}| |}| |}| |S re   )rq   rr   rn   rs   ro   r   rt   r3   r   r   r   _scale   s
   




zSqueezeExcitation._scalec                 C   s   |  |}|| S re   )rv   ru   r   r   r   r4     s   
zSqueezeExcitation.forward)r7   r:   r;   r<   r   r^   r`   Sigmoidr=   r   rb   r   r   rv   r4   rB   r   r   r   r   rk      s"    rk   c                       sv   e Zd ZdZdejjdddfdedee de	e
dejjf  d	e	e
dejjf  d
e	e dedef fddZ  ZS )MLPa  This block implements the multi-layer perceptron (MLP) module.

    Args:
        in_channels (int): Number of channels of the input
        hidden_channels (List[int]): List of the hidden channel dimensions
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
            Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
        bias (bool): Whether to use bias in the linear layer. Default ``True``
        dropout (float): The probability for the dropout layer. Default: 0.0
    NTg        rE   hidden_channelsrK   .rL   rN   r   dropoutc                    s   |d u ri nd|i}g }	|}
|d d D ]2}|	 tjj|
||d |d ur-|	 || |	 |di | |	 tjj|fi | |}
q|	 tjj|
|d |d |	 tjj|fi | t j|	  t|  d S )NrN   r,   )r   r   )rX   r   r^   LinearDropoutr   r   r
   )r   rE   ry   rK   rL   rN   r   rz   r]   r\   in_dim
hidden_dimr   r   r   r     s   zMLP.__init__)r7   r:   r;   r<   r   r^   r`   r=   r   r   r   rb   rA   r>   r   rB   r   r   r   r   rx     s,    rx   c                       s<   e Zd ZdZdee f fddZdedefddZ  Z	S )	PermutezThis module returns a view of the tensor input with its dimensions permuted.

    Args:
        dims (List[int]): The desired ordering of dimensions
    dimsc                    s   t    || _d S re   )r   r   r   )r   r   r   r   r   r   ;  s   

zPermute.__init__r)   r*   c                 C   s   t || jS re   )r   permuter   )r   r)   r   r   r   r4   ?  s   zPermute.forward)
r7   r:   r;   r<   r   r=   r   r   r4   rB   r   r   r   r   r   4  s    r   )rY   typingr   r   r   r   r   r   r   r   utilsr
   r   r^   
functionalinterpolaterb   r   
SequentialrC   rc   rh   rk   rx   r   r   r   r   r   <module>   s     
7921'-