o
    Hh                     @   sj   U d dl mZ d dlZd dlm  mZ d dlmZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )OptionalN)Tensor)2_scripted_functional_optimizer_deprecation_warning__all__c                   @   sj   e Zd Z						ddee dedeeef deeef d	ed
edefddZdee	e  fddZ
dS )_FunctionalRprop{Gz?g      ?g333333?gư>2   Fparamslretas
step_sizesforeachmaximize_allow_empty_param_listc                 C   sv   t dd d|i| _|| _|| _|| _|| _t|dkr"|s"tdd|i| _t	j
tt	jttt	jf f i | _d S )N   )
stacklevelr   r   z%optimizer got an empty parameter listr   )r   defaultsr   r   r   r   len
ValueErrorparam_grouptorchjitannotatedictr   strstate)selfr   r   r   r   r   r   r    r   \/var/www/vscode/kcb/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py__init__   s   


(z_FunctionalRprop.__init__	gradientsc                 C   s  | j d }g }g }g }g }g }| jd }| j\}	}
| j\}}t|t|kr9tddt| d dt|  d}t||D ][\}}|d ur|t|O }|	| |	| || j
vri | j
|< | j
| }td|d	< tj|tjd
|d< t|||d< | j
| }|	|d  |	|d  |	|d	  q@t  tj||||||||	|
| j| j|d W d    d S 1 sw   Y  d S )Nr   r   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: Fg        step)memory_formatprev	step_size)step_size_minstep_size_maxetaminusetaplusr   r   has_complex)r   r   r   r   r   r   zipr   
is_complexappendr   tensor
zeros_likepreserve_format	full_likeno_gradFrpropr   r   )r   r"   r   params_with_gradgradsprevsr   state_stepsr   r)   r*   r'   r(   r+   paramgradientr   r   r   r    r#   6   sh   











"z_FunctionalRprop.stepN)r   r   r	   FFF)__name__
__module____qualname__listr   floattupleboolr!   r   r#   r   r   r   r    r      s.    


r   )typingr   r   torch.optim._functionaloptim_functionalr4   r   ,torch.distributed.optim._deprecation_warningr   r   r?   r   __annotations__r   scriptr   r   r   r   r    <module>   s   