o
    0h/                  	   @   s  U d Z ddlZddlZddlmZmZmZmZ ddlZddlm	Z
mZ ddlmZmZmZ ddlmZ ddlmZ ee
eef ZejZejZd	ag aeeeg df ee f  ed
< e Ze  Z!e Z"dd Z#dd Z$de%fddZ&dDddZ'G dd de(Z)de%fddZ*de%fddZ+dEdee ddfddZ,defddZ-defdd Z.dEdee defd!d"Z/dEdee defd#d$Z0	dFd'ee d(ed)eddfd*d+Z1de2eef fd,d-Z3dEdee deeef fd.d/Z4dDd0d1Z5d2efd3d4Z6deddfd5d6Z7G d7d dZ	G d8d9 d9Z8d2ed: de8fd;d2Z9dGdeeeej	f defd=d>Z:	<dGd?edeeeej	f ddfd@dAZ;ddBl<T g dCZ=dS )HzH
This package enables an interface for accessing MTIA backend in python
    N)AnyCallableOptionalUnion)deviceTensor)_dummy_type_LazySeedTrackerclassproperty)Device   )_get_device_indexF_queued_callsc                   C   s
   t   d S N)
_lazy_init r   r   G/var/www/vscode/kcb/lib/python3.10/site-packages/torch/mtia/__init__.pyinit!      
r   c                   C   s   t ot  S )z9Return whether PyTorch's MTIA state has been initialized.)_initialized_is_in_bad_forkr   r   r   r   is_initialized%   s   r   returnc                   C   
   t j S r   )torch_C_mtia_isInBadForkr   r   r   r   r   *   r   r   c                  C   s  t  sttdr
d S tq t  r	 W d    d S t r tdt s'tdtj	
  dt_tdd t D  z1tD ]'\} }z|   W q> tye } zdt| dd	| }t||d }~ww W ttd nttd w daW d    d S 1 sw   Y  d S )
Nis_initializingzwCannot re-initialize MTIA in forked subprocess. To use MTIA with multiprocessing, you must use the 'spawn' start methodzTorch not compiled with MTIA enabled. Ensure you have `import mtia.host_runtime.torch_mtia.dynamic_library` in your python src file and include `//mtia/host_runtime/torch_mtia:torch_mtia` as your target dependency!Tc                 s   s    | ]}|r|V  qd S r   r   ).0callsr   r   r   	<genexpr>P   s    z_lazy_init.<locals>.<genexpr>z6MTIA call failed lazily at initialization with error: z(

MTIA call was originally invoked at:

 )r   hasattr_tls_initialization_lockr   RuntimeError_is_compiledAssertionErrorr   r   
_mtia_initr   r   extend_lazy_seed_tracker	get_calls	ExceptionstrjoinDeferredMtiaCallErrordelattrr   )queued_callorig_tracebackemsgr   r   r   r   .   sB   



"r   c                   @   s   e Zd ZdS )r/   N)__name__
__module____qualname__r   r   r   r   r/   a   s    r/   c                   C   r   )z*Return true if compiled with MTIA support.)r   r   _mtia_isBuiltr   r   r   r   r&   e      
r&   c                   C   s   t  sdS t dkS )z'Return true if MTIA device is availableFr   )r&   device_countr   r   r   r   is_availablej   s   
r;   r   c                 C   s:   t j|  t j W  d   S 1 sw   Y  dS )z?Waits for all jobs in all streams on a MTIA device to complete.N)r   mtiar   r   _mtia_deviceSynchronizer   r   r   r   synchronizer   s   $r?   c                   C   r   )z,Return the number of MTIA devices available.)r   r   _mtia_getDeviceCountr   r   r   r   r:   x      
r:   c                   C   r   )z0Return the index of a currently selected device.)r   r   %_accelerator_hooks_get_current_devicer   r   r   r   current_device~   r9   rC   c                 C      t jt| ddS )aS  Return the currently selected :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the currently selected :class:`Stream` for the current device, given
            by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
            (default).
    Toptional)r   r   _mtia_getCurrentStreamr   r>   r   r   r   current_stream      	rH   c                 C   rD   )a=  Return the default :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the default :class:`Stream` for the current device, given by
            :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
            (default).
    TrE   )r   r   _mtia_getDefaultStreamr   r>   r   r   r   default_stream   rI   rK   allpythonenabledstacksmax_entriesc                 C   s   t  sdS tj| || dS )a  Enable/Disable the memory profiler on MTIA allocator

    Args:
        enabled (all or state, optional) selected device. Returns
            statistics for the current device, given by current_device(),
            if device is None (default).

        stacks ("python" or "cpp", optional). Select the stack trace to record.

        max_entries (int, optional). Maximum number of entries to record.
    N)r   r   r   _mtia_recordMemoryHistory)rN   rO   rP   r   r   r   record_memory_history   s   rR   c                   C   r   )z4Return a dictionary of MTIA memory allocator history)r   r   _mtia_memorySnapshotr   r   r   r   snapshot   rA   rT   c                 C   rD   )a  Return capability of a given device as a tuple of (major version, minor version).

    Args:
        device (torch.device or int, optional) selected device. Returns
            statistics for the current device, given by current_device(),
            if device is None (default).
    TrE   )r   r   _mtia_getDeviceCapabilityr   r>   r   r   r   get_device_capability   s   rV   c                   C   r   )zEmpty the MTIA device cache.)r   r   _mtia_emptyCacher   r   r   r   empty_cache   r9   rX   streamc                 C   s   | du rdS t j|  dS )a  Set the current stream.This is a wrapper API to set the stream.
        Usage of this function is discouraged in favor of the ``stream``
        context manager.

    Args:
        stream (Stream): selected stream. This function is a no-op
            if this argument is ``None``.
    N)r   r   _mtia_setCurrentStreamrY   r   r   r   
set_stream   s   	r\   c                 C   s$   t | } | dkrtj|  dS dS )zSet the current device.

    Args:
        device (torch.device or int): selected device. This function is a no-op
            if this argument is negative.
    r   N)r   r   r   %_accelerator_hooks_set_current_devicer>   r   r   r   
set_device   s   r^   c                   @   s<   e Zd ZdZd efddZdd Zdededefd	d
ZdS )r   zContext-manager that changes the selected device.

    Args:
        device (torch.device or int): device index to select. It's a no-op if
            this argument is a negative integer or ``None``.
    c                 C   s   t |dd| _d| _d S )NTrE   )r   idxprev_idx)selfr   r   r   r   __init__   s   
zdevice.__init__c                 C   s   t j| j| _d S r   )r   r   (_accelerator_hooks_maybe_exchange_devicer`   ra   )rb   r   r   r   	__enter__   s   zdevice.__enter__typevalue	tracebackc                 C   s   t j| j| _dS )NF)r   r   rd   ra   r`   )rb   rf   rg   rh   r   r   r   __exit__   s   zdevice.__exit__N)r5   r6   r7   __doc__r   rc   re   ri   r   r   r   r   r      s
    c                   @   sN   e Zd ZU dZed ed< ded fddZdd Zd	ed
edefddZ	dS )StreamContexta  Context-manager that selects a given stream.

    All MTIA kernels queued within its context will be enqueued on a selected
    stream.

    Args:
        Stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: Streams are per-device.
    torch.mtia.Stream
cur_streamrY   c                 C   st   d | _ || _td d| _tj s| jd u rd| _tj s d ntjd | _	tj s1d | _
d S tjd | _
d S )NTr_   )rm   rY   r   r`   r   jitis_scriptingr<   rK   src_prev_streamdst_prev_stream)rb   rY   r   r   r   rc      s   


zStreamContext.__init__c                 C   s   | j }|d u s| jdkrd S tjd | _| jj|jkr9t|j tj|j| _W d    n1 s4w   Y  tj| d S Nr_   )	rY   r`   r   r<   rH   rp   r   rq   r\   )rb   rm   r   r   r   re     s   zStreamContext.__enter__rf   rg   rh   c                 C   sJ   | j }|d u s| jdkrd S | jj|jkrtj| j tj| j d S rr   )rY   r`   rp   r   r   r<   r\   rq   )rb   rf   rg   rh   rm   r   r   r   ri     s   zStreamContext.__exit__N)
r5   r6   r7   rj   r   __annotations__rc   re   r   ri   r   r   r   r   rk      s   
 rk   rl   c                 C   s   t | S )a,  Wrap around the Context-manager StreamContext that selects a given stream.

    Arguments:
        stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream
    )rk   r[   r   r   r   rY   ,  s   r<   c                 C   s$   t jdtdd tjdgtj| dS )zReturns the random number generator state as a ByteTensor.

    Args:
        device (torch.device or int, optional): The device to return the RNG state of.
            Default: ``'mtia'`` (i.e., ``torch.device('mtia')``, the current mtia device).
    z.get_rng_state is not implemented in torch.mtia   
stacklevelr   )dtyper   )warningswarnUserWarningr   zerosuint8r>   r   r   r   get_rng_state7  s   r}   	new_statec                 C   s   t jdtdd dS )a  Sets the random number generator state.

    Args:
        new_state (torch.ByteTensor): The desired state
        device (torch.device or int, optional): The device to set the RNG state.
            Default: ``'mtia'`` (i.e., ``torch.device('mtia')``, the current mtia device).
    z.set_rng_state is not implemented in torch.mtiart   ru   N)rx   ry   rz   )r~   r   r   r   r   set_rng_stateF  s
   

r   )*)r   r;   r   r?   r:   rC   rH   rK   memory_statsmax_memory_allocatedreset_peak_memory_statsrV   rR   rT   rX   r^   r\   rY   r   r   r}   )r   Nr   )rL   rM   r   )r<   )>rj   	threadingrx   typingr   r   r   r   r   r   _devicer   torch._utilsr   r	   r
   torch.typesr   _utilsr   r-   int	_device_tEventStreamr   r   listtuplers   localr#   Lockr$   r*   r   r   boolr   r   r,   r/   r&   r;   r?   r:   rC   rH   rK   rR   dictrT   rV   rX   r\   r^   rk   rY   r}   r   memory__all__r   r   r   r   <module>   sz   
3
 
: 
