o
    Hh                     @   sj   U d dl mZ d dlZd dlm  mZ d dlmZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )OptionalN)Tensor)2_scripted_functional_optimizer_deprecation_warning__all__c                   @   s~   e Zd Z												ddee ded	ed
edededededededededefddZdeee  fddZ	dS )_FunctionalAdagrad{Gz?              ?绽|=TFparamslrlr_decayweight_decayinitial_accumulator_valuewarmup_lr_multiplierwarmup_num_itersepscoalesce_gradforeachfusedmaximize_allow_empty_param_listc                 C   s   t dd |||||||d| _|	| _|
| _|| _|| _tjt	tj
t	ttj
f f i | _t|dkr:|s:tdd|i| _| jd D ]}t|j|tdd| j|< qDd S )	N   )
stacklevel)r   r   r   r   r   r   r   r   z%optimizer got an empty parameter listr   r   )sumstep)r   defaultsr   r   r   r   torchjitannotatedictr   strstatelen
ValueErrorparam_group	full_likedatatensor)selfr   r   r   r   r   r   r   r   r   r   r   r   r   p r+   ^/var/www/vscode/kcb/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py__init__   s,   
	$
z_FunctionalAdagrad.__init__	gradientsc                 C   s@  | j d }g }g }g }g }t|t|kr(tddt| d dt|  d\}}t| j d |D ]1\}	}
|
d ure||
jO }|t|	O }||	 ||
 | j|	 }||d  ||d  q4t	 , t
j||||| jd	 | jd
 | jd | jd || j| j|| jd d d W d    d S 1 sw   Y  d S )Nr   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: )FFr   r   r   r   r   r   )r   r   r   r   has_sparse_gradr   r   has_complexr   
grad_scale	found_inf)r%   r#   r$   zip	is_sparser   
is_complexappendr"   no_gradFadagradr   r   r   r   )r)   r.   r   params_with_gradgrads
state_sumsstate_stepsr/   r0   paramgradientr"   r+   r+   r,   r   I   sV   





"z_FunctionalAdagrad.stepN)r   r   r   r   r	   r   r
   TFFFF)
__name__
__module____qualname__listr   floatboolr-   r   r   r+   r+   r+   r,   r      sR    	

/r   )typingr   r   torch.optim._functionaloptim_functionalr8   r   ,torch.distributed.optim._deprecation_warningr   r   rC   r!   __annotations__r   scriptr   r+   r+   r+   r,   <module>   s   