o
    Hñh   ã                   @   sä   d Z ddlZddlZddlmZ ddlmZmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ eejdƒr`ddlmZ ddlm Z  ddl!m"Z" g d¢Z#dS )aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
é    N)Úoptimé   )Ú_apply_optimizer_in_backwardÚ_get_in_backward_optimizers)Ú_FunctionalAdadelta)Ú_FunctionalAdagrad)Ú_FunctionalAdam)Ú_FunctionalAdamax)Ú_FunctionalAdamW)Ú_FunctionalRMSprop)Ú_FunctionalRprop)Ú_FunctionalSGD)Ú_NamedOptimizer)Úas_functional_optimÚ	_rpc_init)ÚDistributedOptimizer)ÚPostLocalSGDOptimizer)ÚZeroRedundancyOptimizer)r   r   r   r   )$Ú__doc__ÚwarningsÚtorchr   Úapply_optimizer_in_backwardr   r   Úfunctional_adadeltar   Úfunctional_adagradr   Úfunctional_adamr   Úfunctional_adamaxr	   Úfunctional_adamwr
   Úfunctional_rmspropr   Úfunctional_rpropr   Úfunctional_sgdr   Únamed_optimizerr   Úutilsr   ÚhasattrÚ_CÚ	optimizerr   Úpost_localSGD_optimizerr   Úzero_redundancy_optimizerr   Ú__all__© r(   r(   úT/var/www/vscode/kcb/lib/python3.10/site-packages/torch/distributed/optim/__init__.pyÚ<module>   s(    