o
    HhL                     @   sj   U d dl mZ d dlZd dlm  mZ d dlmZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )OptionalN)Tensor)2_scripted_functional_optimizer_deprecation_warning__all__c                   @   sl   e Zd Z									ddee deded	ed
edededededefddZdeee  fddZ	dS )_FunctionalRMSprop{Gz?Gz?:0yE>        Fparamslralphaepsweight_decaymomentumcenteredforeachmaximize_allow_empty_param_listc                 C   sx   t dd |||||d| _|| _|| _|	| _t|dkr#|
s#tdd|i| _tj	
ttjtttjf f i | _d S )N   )
stacklevel)r   r   r   r   r   r   z%optimizer got an empty parameter listr   )r   defaultsr   r   r   len
ValueErrorparam_grouptorchjitannotatedictr   strstate)selfr   r   r   r   r   r   r   r   r   r    r"   ^/var/www/vscode/kcb/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py__init__   s   

(z_FunctionalRMSprop.__init__	gradientsc                 C   s  | j d }g }g }g }g }g }g }| jd }	| jd }
| jd }| jd }| jd }t|t|krEtddt| d	 d
t|  d}t||D ]|\}}|d ur|t|O }|| || || jvri | j|< | j| }t	d|d< tj
|tjd|d< |dkrtj
|tjd|d< | jrtj
|tjd|d< | j| }||d  |dkr||d  | jr||d  ||d  qLt   tj|||||||	|
|||| j| j| j|d W d    d S 1 sw   Y  d S )Nr   r   r   r   r   r   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: Fr
   step)memory_format
square_avgr   momentum_buffergrad_avg)	r   r   r   r   r   r   r   r   has_complex)r   r   r   r   zipr   
is_complexappendr    tensor
zeros_likepreserve_formatr   no_gradFrmspropr   r   )r!   r%   r   params_with_gradgradssquare_avgs	grad_avgsmomentum_buffer_liststate_stepsr   r   r   r   r   r+   paramgradientr    r"   r"   r#   r&   <   s   















"z_FunctionalRMSprop.stepN)	r   r   r	   r
   r
   FFFF)
__name__
__module____qualname__listr   floatboolr$   r   r&   r"   r"   r"   r#   r      s@    	

"r   )typingr   r   torch.optim._functionaloptim_functionalr3   r   ,torch.distributed.optim._deprecation_warningr   r   r@   r   __annotations__r   scriptr   r"   r"   r"   r#   <module>   s   