o
    Vh                     @   sp  d dl Z d dlmZ d dlmZmZmZ d dlZd dlm	Z	 d dlm
Z
 d dlmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZmZmZ ddlm Z m!Z!m"Z" g dZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'G dd deZ(eddeddd fddd d!d"d#eee(ef  d$e)d%e)d&ed'e'f
d(d)ZdS )*    N)partial)AnyOptionalUnion)Tensor)
functional   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)BasicConv2d	GoogLeNetGoogLeNet_WeightsGoogLeNetOutputs	InceptionInceptionAux   )_fuse_modules_replace_reluquantize_model)QuantizableGoogLeNetGoogLeNet_QuantizedWeights	googlenetc                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )QuantizableBasicConv2dargskwargsreturnNc                    s    t  j|i | t | _d S N)super__init__nnReLUreluselfr   r    	__class__ ]/var/www/vscode/kcb/lib/python3.10/site-packages/torchvision/models/quantization/googlenet.pyr$      s   zQuantizableBasicConv2d.__init__xc                 C   s"   |  |}| |}| |}|S r"   convbnr'   r)   r.   r,   r,   r-   forward   s   


zQuantizableBasicConv2d.forwardis_qatc                 C   s   t | g d|dd d S )Nr/   T)inplace)r   )r)   r4   r,   r,   r-   
fuse_model$   s   z!QuantizableBasicConv2d.fuse_modelr"   )__name__
__module____qualname__r   r$   r   r3   r   boolr6   __classcell__r,   r,   r*   r-   r      s     r   c                       <   e Zd Zdededdf fddZdedefdd	Z  ZS )
QuantizableInceptionr   r    r!   Nc                    s&   t  j|dti| tj | _d S N
conv_block)r#   r$   r   r%   	quantizedFloatFunctionalcatr(   r*   r,   r-   r$   )   s   zQuantizableInception.__init__r.   c                 C   s   |  |}| j|dS )Nr   )_forwardrB   )r)   r.   outputsr,   r,   r-   r3   -   s   
zQuantizableInception.forwardr7   r8   r9   r   r$   r   r3   r;   r,   r,   r*   r-   r=   (   s    r=   c                       r<   )
QuantizableInceptionAuxr   r    r!   Nc                    s$   t  j|dti| t | _d S r>   )r#   r$   r   r%   r&   r'   r(   r*   r,   r-   r$   4   s   z QuantizableInceptionAux.__init__r.   c                 C   sJ   t |d}| |}t|d}| | |}| |}| |}|S )N)   rG   r   )	Fadaptive_avg_pool2dr0   torchflattenr'   fc1dropoutfc2r2   r,   r,   r-   r3   8   s   


zQuantizableInceptionAux.forwardrE   r,   r,   r*   r-   rF   2   s    rF   c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee	 ddfddZ
  ZS )r   r   r    r!   Nc                    s<   t  j|dtttgi| tjj | _	tjj
 | _d S )Nblocks)r#   r$   r   r=   rF   rJ   aoquantization	QuantStubquantDeQuantStubdequantr(   r*   r,   r-   r$   L   s   zQuantizableGoogLeNet.__init__r.   c                 C   sl   |  |}| |}| |\}}}| |}| jo| j}tj r/|s)t	
d t|||S | |||S )NzCScripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple)_transform_inputrS   rC   rU   training
aux_logitsrJ   jitis_scriptingwarningswarnr   eager_outputs)r)   r.   aux1aux2aux_definedr,   r,   r-   r3   S   s   




zQuantizableGoogLeNet.forwardr4   c                 C   s(   |   D ]}t|tu r|| qdS )a  Fuse conv/bn/relu modules in googlenet model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r6   )r)   r4   mr,   r,   r-   r6   `   s
   
zQuantizableGoogLeNet.fuse_modelr"   )r7   r8   r9   r   r$   r   r   r3   r   r:   r6   r;   r,   r,   r*   r-   r   J   s     r   c                   @   sH   e Zd Zedeeddddeddejdd	d
didddd
dZ	e	Z
dS )r   zKhttps://download.pytorch.org/models/quantized/googlenet_fbgemm-c81f6644.pth   )	crop_sizeie )   rf   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg/tQ@g`"YV@)zacc@1zacc@5g+?g#~j<)@z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )

num_paramsmin_size
categoriesbackendrecipeunquantized_metrics_ops
_file_size_docs)url
transformsmetaN)r7   r8   r9   r   r   r	   r   r   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr,   r,   r,   r-   r   m   s*    
r   quantized_googlenet)name
pretrainedc                 C   s   |  ddr	tjS tjS )NquantizeF)getr   rv   r   ru   )r    r,   r,   r-   <lambda>   s   
r}   )weightsTF)r~   progressr{   r~   r   r{   r    r!   c                 K   s   |rt nt| } |dd}| durBd|vrt|dd t|dd t|dd t|dt| jd  d	| jv rBt|d	| jd	  |d	d
}tdi |}t	| |rZt
|| | durz|| j|dd |sud|_d|_d|_|S td |S )a  GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.GoogLeNet_Weights
        :members:
        :noindex:
    rX   FNtransform_inputTinit_weightsnum_classesrj   rk   rg   )r   
check_hashz`auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train themr,   )r   r   verifyr|   r   lenrt   popr   r   r   load_state_dictget_state_dictrX   r^   r_   r[   r\   )r~   r   r{   r    original_aux_logitsrk   modelr,   r,   r-   r      s4   ,

r   )*r[   	functoolsr   typingr   r   r   rJ   torch.nnr%   r   r   rH   transforms._presetsr	   _apir   r   r   _metar   _utilsr   r   r   r   r   r   r   r   r   utilsr   r   r   __all__r   r=   rF   r   r   r:   r,   r,   r,   r-   <module>   sN     
#
