o
    VhR                     @   s2  U d dl Z d dlZd dlmZ d dlmZ d dlmZmZm	Z	m
Z
mZmZmZmZ d dlZd dlmZmZ d dlmZ ddlmZmZ dd	lmZmZ dd
lmZ ddlmZmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% g dZ&eG dd dZ'G dd de'Z(G dd de'Z)G dd dej*Z+G dd dej*Z,G dd dej*Z-deee(e)f  de.dee/ dee d e0d!ed"e-fd#d$Z1d%e2d!ed"eeee(e)f  ee/ f fd&d'Z3d(e!iZ4e	e2ef e5d)< i e4d*d+d,Z6i e4d-d.d,Z7G d/d0 d0eZ8G d1d2 d2eZ9G d3d4 d4eZ:G d5d6 d6eZ;G d7d8 d8eZ<G d9d: d:eZ=G d;d< d<eZ>G d=d> d>eZ?G d?d@ d@eZ@G dAdB dBeZAG dCdD dDeZBe e%dEe8jCfdFddGdHdee8 d e0d!ed"e-fdIdJZDe e%dEe9jCfdFddGdHdee9 d e0d!ed"e-fdKdLZEe e%dEe:jCfdFddGdHdee: d e0d!ed"e-fdMdNZFe e%dEe;jCfdFddGdHdee; d e0d!ed"e-fdOdPZGe e%dEe<jCfdFddGdHdee< d e0d!ed"e-fdQdRZHe e%dEe=jCfdFddGdHdee= d e0d!ed"e-fdSdTZIe e%dEe>jCfdFddGdHdee> d e0d!ed"e-fdUdVZJe e%dEe?jCfdFddGdHdee? d e0d!ed"e-fdWdXZKe e%dEe@jCfdFddGdHdee@ d e0d!ed"e-fdYdZZLe e%dEeAjCfdFddGdHdeeA d e0d!ed"e-fd[d\ZMe e%dEeBjCfdFddGdHdeeB d e0d!ed"e-fd]d^ZNdS )_    N)	dataclass)partial)AnyCallableDictListOptionalSequenceTupleUnion)nnTensor)StochasticDepth   )Conv2dNormActivationSqueezeExcitation)ImageClassificationInterpolationMode)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interface)EfficientNetEfficientNet_B0_WeightsEfficientNet_B1_WeightsEfficientNet_B2_WeightsEfficientNet_B3_WeightsEfficientNet_B4_WeightsEfficientNet_B5_WeightsEfficientNet_B6_WeightsEfficientNet_B7_WeightsEfficientNet_V2_S_WeightsEfficientNet_V2_M_WeightsEfficientNet_V2_L_Weightsefficientnet_b0efficientnet_b1efficientnet_b2efficientnet_b3efficientnet_b4efficientnet_b5efficientnet_b6efficientnet_b7efficientnet_v2_sefficientnet_v2_mefficientnet_v2_lc                
   @   st   e Zd ZU eed< eed< eed< eed< eed< eed< edejf ed< e	dd
edede
e defddZd	S )_MBConvConfigexpand_ratiokernelstrideinput_channelsout_channels
num_layers.blockNchannels
width_mult	min_valuereturnc                 C   s   t | | d|S )N   )r   )r<   r=   r>    rA   S/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/models/efficientnet.pyadjust_channels8   s   z_MBConvConfig.adjust_channelsN)__name__
__module____qualname__float__annotations__intr   r   Modulestaticmethodr   rC   rA   rA   rA   rB   r4   .   s   
 &r4   c                       sr   e Zd Z			ddedededededed	ed
edeedejf  ddf fddZ	e
ded
efddZ  ZS )MBConvConfig      ?Nr5   r6   r7   r8   r9   r:   r=   
depth_multr;   .r?   c
           
   	      sL   |  ||}|  ||}| ||}|	d u rt}	t |||||||	 d S rD   )rC   adjust_depthMBConvsuper__init__)
selfr5   r6   r7   r8   r9   r:   r=   rO   r;   	__class__rA   rB   rS   ?   s   zMBConvConfig.__init__c                 C   s   t t| | S rD   )rJ   mathceil)r:   rO   rA   rA   rB   rP   R   s   zMBConvConfig.adjust_depth)rN   rN   N)rE   rF   rG   rH   rJ   r   r   r   rK   rS   rL   rP   __classcell__rA   rA   rU   rB   rM   =   s6    
	
rM   c                       sP   e Zd Z	ddededededededeed	ejf  d
df fddZ	  Z
S )FusedMBConvConfigNr5   r6   r7   r8   r9   r:   r;   .r?   c              	      s(   |d u rt }t ||||||| d S rD   )FusedMBConvrR   rS   )rT   r5   r6   r7   r8   r9   r:   r;   rU   rA   rB   rS   Y   s   
zFusedMBConvConfig.__init__rD   )rE   rF   rG   rH   rJ   r   r   r   rK   rS   rY   rA   rA   rU   rB   rZ   W   s&    
	rZ   c                       s\   e Zd Zefdedededejf dedejf ddf
 fdd	Z	d
e
de
fddZ  ZS )rQ   cnfstochastic_depth_prob
norm_layer.se_layerr?   Nc           	         s  t    d|j  krdkstd td|jdko"|j|jk| _g }tj}|	|j|j
}||jkrC|t|j|d||d |t|||j|j|||d td|jd }||||ttjddd	 |t||jd|d d tj| | _t|d
| _|j| _d S )Nr   r   illegal stride valuekernel_sizer^   activation_layer)rb   r7   groupsr^   rc      T)inplace)
activationrow)rR   rS   r7   
ValueErrorr8   r9   use_res_connectr   SiLUrC   r5   appendr   r6   maxr   
Sequentialr;   r   stochastic_depth)	rT   r\   r]   r^   r_   layersrc   expanded_channelssqueeze_channelsrU   rA   rB   rS   i   sP   

zMBConv.__init__inputc                 C   &   |  |}| jr| |}||7 }|S rD   r;   rj   ro   rT   rs   resultrA   rA   rB   forward   
   

zMBConv.forward)rE   rF   rG   r   rM   rH   r   r   rK   rS   r   rx   rY   rA   rA   rU   rB   rQ   h   s    :rQ   c                       sJ   e Zd Zdedededejf ddf fddZd	e	de	fd
dZ
  ZS )r[   r\   r]   r^   .r?   Nc              
      s   t    d|j  krdkstd td|jdko"|j|jk| _g }tj}|	|j|j
}||jkrT|t|j||j|j||d |t||jd|d d n|t|j|j|j|j||d tj| | _t|d| _|j| _d S )Nr   r   r`   rb   r7   r^   rc   ra   rh   )rR   rS   r7   ri   r8   r9   rj   r   rk   rC   r5   rl   r   r6   rn   r;   r   ro   )rT   r\   r]   r^   rp   rc   rq   rU   rA   rB   rS      sL   

zFusedMBConv.__init__rs   c                 C   rt   rD   ru   rv   rA   rA   rB   rx      ry   zFusedMBConv.forward)rE   rF   rG   rZ   rH   r   r   rK   rS   r   rx   rY   rA   rA   rU   rB   r[      s    4r[   c                       s   e Zd Z				ddeeeef  dededede	e
d	ejf  d
e	e ddf fddZdedefddZdedefddZ  ZS )r   皙?  Ninverted_residual_settingdropoutr]   num_classesr^   .last_channelr?   c              
      s0  t    t|  |stdt|trtdd |D s!td|du r(tj	}g }|d j
}|td|dd|tjd	 td
d |D }	d}
|D ]8}g }t|jD ]&}t|}|rc|j|_
d|_|t|
 |	 }||||| |
d7 }
qS|tj|  qJ|d j}|dur|nd| }|t||d|tjd tj| | _td| _ttj|ddt||| _|  D ]V}t|tjrtjj |j!dd |j"durtj#|j" qt|tj	tj$frtj%|j! tj#|j" qt|tjrdt&'|j( }tj)|j!| | tj#|j" qdS )a  
        EfficientNet V1 and V2 main class

        Args:
            inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
            dropout (float): The droupout probability
            stochastic_depth_prob (float): The stochastic depth probability
            num_classes (int): Number of classes
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            last_channel (int): The number of channels on the penultimate layer
        z1The inverted_residual_setting should not be emptyc                 S   s   g | ]}t |tqS rA   )
isinstancer4   ).0srA   rA   rB   
<listcomp>  s    z)EfficientNet.__init__.<locals>.<listcomp>z:The inverted_residual_setting should be List[MBConvConfig]Nr      r   rz   c                 s   s    | ]}|j V  qd S rD   )r:   )r   r\   rA   rA   rB   	<genexpr>  s    z(EfficientNet.__init__.<locals>.<genexpr>r   re   ra   T)prf   fan_out)moderN   )*rR   rS   r   ri   r   r	   all	TypeErrorr   BatchNorm2dr8   rl   r   rk   sumranger:   copyr9   r7   rH   r;   rn   featuresAdaptiveAvgPool2davgpoolDropoutLinear
classifiermodulesConv2dinitkaiming_normal_weightbiaszeros_	GroupNormones_rW   sqrtout_featuresuniform_)rT   r}   r~   r]   r   r^   r   rp   firstconv_output_channelstotal_stage_blocksstage_block_idr\   stage_	block_cnfsd_problastconv_input_channelslastconv_output_channelsm
init_rangerU   rA   rB   rS      s~   







zEfficientNet.__init__xc                 C   s.   |  |}| |}t|d}| |}|S )Nr   )r   r   torchflattenr   rT   r   rA   rA   rB   _forward_implL  s
   


zEfficientNet._forward_implc                 C   s
   |  |S rD   )r   r   rA   rA   rB   rx   V  s   
zEfficientNet.forward)r{   r|   NN)rE   rF   rG   r	   r   rM   rZ   rH   rJ   r   r   r   rK   rS   r   r   rx   rY   rA   rA   rU   rB   r      s,    c
r   r}   r~   r   weightsprogresskwargsr?   c                 K   sT   |d urt |dt|jd  t| |fd|i|}|d ur(||j|dd |S )Nr   
categoriesr   T)r   
check_hash)r   lenmetar   load_state_dictget_state_dict)r}   r~   r   r   r   r   modelrA   rA   rB   _efficientnetZ  s   r   archc                 K   sB  |  drRtt|d|dd}|dddddd|d	dd
ddd
|d	dd
ddd
|d	dd
ddd|d	ddddd|d	dd
ddd|d	dddddg}d }||fS |  drtdddddd
tddd
dddtddd
dddtddd
ddd	td	dddddtd	dd
dddg}d}||fS |  drtddddddtddd
dddtddd
dddtddd
dddtd	dddddtd	dd
dd d!td	ddd d"dg}d}||fS |  d#rtddddddtddd
dddtddd
dd$dtddd
d$dd%td	dddd&d'td	dd
d&d(d)td	ddd(d*dg}d}||fS td+|  ),Nefficientnet_br=   rO   r=   rO   r   r             r         (   P   p      re   @  r1   0   @         	         i   r2            i0     i   r3   `   
              i  zUnsupported model type )
startswithr   rM   poprZ   ri   )r   r   
bneck_confr}   r   rA   rA   rB   _efficientnet_confm  sZ   
	$

		r   r   _COMMON_META)r   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1)min_sizerecipe)!   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2c                
   @   J   e Zd Zedeeddejdi eddddd	id
ddddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pthr   r   	crop_sizeresize_sizeinterpolationidP ImageNet-1Kg?5^IlS@g5^IbW@zacc@1zacc@5gNbX9?g~jts4@1These weights are ported from the original paper.
num_params_metrics_ops
_file_size_docsurl
transformsr   NrE   rF   rG   r   r   r   r   BICUBIC_COMMON_META_V1IMAGENET1K_V1DEFAULTrA   rA   rA   rB   r     *    
r   c                   @   s   e Zd Zedeeddejdi eddddd	id
ddddZ	edeeddej
di edddddd	id
ddddZeZdS )r   zJhttps://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth   r   r   iv r   g+S@gClW@r   gCl?gM">@r   r   r   z@https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth   zOhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuninggʡS@gƻW@gA`">@$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            )r   r   r   r   r   r   N)rE   rF   rG   r   r   r   r   r   r   r   BILINEARIMAGENET1K_V2r   rA   rA   rA   rB   r     sR    

r   c                
   @   J   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r    zJhttps://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pthi   r   i r   gx&T@gp=
W@r   g rh?gʡEA@r   r   r   Nr   rA   rA   rA   rB   r      r   r    c                
   @   r   )r!   zJhttps://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pthi,  r   r   i r   gnT@g~jtX@r   gZd;?gd;OG@r   r   r   Nr   rA   rA   rA   rB   r!     r   r!   c                
   @   r   )r"   zJhttps://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pthi|  r   r   i0!'r   gjtT@gt&X@r   g~jt@gKR@r   r   r   Nr   rA   rA   rA   rB   r"   /  r   r"   c                
   @   r   )r#   zJhttps://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pthi  r   ir   g#~jT@gx&1(X@r   gx&1$@gK7]@r   r   r   Nr   rA   rA   rA   rB   r#   G  r   r#   c                
   @   r   )r$   zJhttps://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pthi  r   ir   gn U@gv:X@r   g rh3@g$d@r   r   r   Nr   rA   rA   rA   rB   r$   _  r   r$   c                
   @   r   )r%   zJhttps://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pthiX  r   icr   g+U@g'1:X@r   gsh|B@go@r   r   r   Nr   rA   rA   rA   rB   r%   w  r   r%   c                
   @   r   )r&   zBhttps://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pthr   r   i8nGr   g;OU@gx&18X@r   gZd @gVT@r   r   r   NrE   rF   rG   r   r   r   r   r   _COMMON_META_V2r   r   rA   rA   rA   rB   r&     0    r&   c                
   @   r   )r'   zBhttps://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth  r   i:r   gI+GU@gDlIX@r   gE8@gQ j@r   r   r   Nr  rA   rA   rA   rB   r'     r  r'   c                
   @   sN   e Zd Zedeeddejdddi eddddd	id
ddddZ	e	Z
dS )r(   zBhttps://download.pytorch.org/models/efficientnet_v2_l-59c71312.pthr  )      ?r  r  )r   r   r   meanstdiHfr   gʡEsU@gOnrX@r   g
ףp=
L@gI+i|@r   r   r   N)rE   rF   rG   r   r   r   r   r   r  r   r   rA   rA   rA   rB   r(     s4    r(   
pretrained)r   T)r   r   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B0_Weights
        :members:
    r)   rN   r   r~   r{   )r   verifyr   r   r   r   r   r   r}   r   rA   rA   rB   r)        
r)   c                 K   <   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B1_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B1_Weights
        :members:
    r*   rN   皙?r   r~   r{   )r   r	  r   r   r   r
  rA   rA   rB   r*     r  r*   c                 K   r  )a  EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B2_Weights
        :members:
    r+   r  333333?r   r~   333333?)r    r	  r   r   r   r
  rA   rA   rB   r+   &  r  r+   c                 K   r  )a  EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B3_Weights
        :members:
    r,   r  ffffff?r   r~   r  )r!   r	  r   r   r   r
  rA   rA   rB   r,   E     

r,   c                 K   r  )a  EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B4_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B4_Weights
        :members:
    r-   r  ?r   r~   皙?)r"   r	  r   r   r   r
  rA   rA   rB   r-   i  r  r-   c                 K   L   t | } tdddd\}}t||dd|| |fdttjdd	d
i|S )a  EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B5_Weights
        :members:
    r.   g?g@r   r~   r  r^   MbP?{Gz?epsmomentum)r#   r	  r   r   r   r   r   r   r
  rA   rA   rB   r.        

r.   c                 K   r  )a  EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B6_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B6_Weights
        :members:
    r/   r  g@r   r~   r  r^   r  r  r  )r$   r	  r   r   r   r   r   r   r
  rA   rA   rB   r/     r  r/   c                 K   r  )a  EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B7_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B7_Weights
        :members:
    r0   g       @g@r   r~   r  r^   r  r  r  )r%   r	  r   r   r   r   r   r   r
  rA   rA   rB   r0     r  r0   c                 K   D   t | } td\}}t||dd|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-S architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
        :members:
    r1   r~   r{   r^   r  r  )r&   r	  r   r   r   r   r   r   r
  rA   rA   rB   r1        

r1   c                 K   r  )a  
    Constructs an EfficientNetV2-M architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
        :members:
    r2   r~   r  r^   r  r  )r'   r	  r   r   r   r   r   r   r
  rA   rA   rB   r2   "  r  r2   c                 K   r  )a  
    Constructs an EfficientNetV2-L architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
        :members:
    r3   r~   r  r^   r  r  )r(   r	  r   r   r   r   r   r   r
  rA   rA   rB   r3   H  r  r3   )Or   rW   dataclassesr   	functoolsr   typingr   r   r   r   r   r	   r
   r   r   r   r   torchvision.opsr   ops.miscr   r   transforms._presetsr   r   utilsr   _apir   r   r   _metar   _utilsr   r   r   __all__r4   rM   rZ   rK   rQ   r[   r   rH   rJ   boolr   strr   r   rI   r   r  r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   rA   rA   rA   rB   <module>   s  
 (C=r

80""###$$