o
    Ih                     @   s   d Z ddlZddlZddlZddlZddlZddlZddlmZ ddl	m
Z
 ddlZddlmZmZ ddlmZ eeZeeeddd	d
dde
e fddZejeddZejeddZdd Zeddd ZdS )a*  
This module provides TVM backend integration for TorchDynamo.

Apache TVM is a deep learning compiler framework that can optimize and execute
models on various hardware backends. This module enables:

- Compilation of PyTorch models to TVM's computation graphs
- Multiple scheduling options:
  - Default scheduler
  - Auto-scheduler for automatic optimization
  - Meta-schedule for evolutionary search-based tuning
- Hardware-specific optimizations:
  - CUDA GPU support
  - CPU support with LLVM targeting and architecture-specific tuning
  - Automatic detection of CPU capabilities (AVX2, AVX512)
- Tensor conversion utilities between PyTorch and TVM formats
- Configurable optimization levels and tuning trials

The backend can be used with torch.compile():
    model = torch.compile(model, backend="tvm")
    N)MappingProxyType)Optional   )device_from_inputsfake_tensor_unsupported)register_backend N     )	schedulertrials	opt_level)optionsr   c                   sf  dd l ddl m} ddlm} tj| |}t|}dd t|D }| | }t	|dkr6t
d | jS |j||\}	}
|jdkrP|j}j }nd}jt }|dd }|d u rmtjd	d }|d
d}|dd}|dkrddl m} t }tj|s||	d |
|\}}t	|dkr|||}tj|s|dksJ |j ||!|gdd}z|"| W n t#y   tj|rt$|  w |%|* j&j'|ddid |j(|	||
d}W d    n1 sw   Y  W d    n	1 sw   Y  n|dkriddl m)} t* ?}|jdkr8jt  d|j+j,dd }|dks?J |j-j.|	|||d|
d|d}|j-j/||	||
|d}W d    n	1 scw   Y  n-|d ksq|sj&j'|d! |j(|	||
d}W d    n	1 sw   Y  nt0d"|1|d  | d#d$ fd%d& fd'd(}|S ))Nr   )relay)graph_executorc                 S   s    g | ]\}}d | |j fqS )inp_)shape).0idxi r   N/var/www/vscode/kcb/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py
<listcomp>;   s     ztvm.<locals>.<listcomp>z0Explicitly fall back to eager due to zero outputcudar
   TVM_SCHEDULERr   r   r   r	   auto_scheduler)r   maini  )num_measure_trialsmeasure_callbacksearly_stoppingz relay.backend.use_auto_schedulerT)r   config)targetparamsmeta_schedule)r"   z --num-cores F)logical@   evolutionary)modr    work_dirmax_trials_globalnum_trials_per_iterr!   strategyr   )databaser&   r    r!   r   default)r   zThis tuning option is invalid/not implemented for torchdynamo's TVM-related backend. There are three available options: default, auto_scheduler and meta_schedule.c                 S   s*   | j dkrt|  S tjj|  S )z8A helper function to transfer a NDArray to torch.tensor.bool)dtypetorch
from_numpynumpyutilsdlpackfrom_dlpack	to_dlpack)	nd_tensorr   r   r   to_torch_tensor   s   
ztvm.<locals>.to_torch_tensorc                    s,   | j tjkr j|   S  j| S )z8A helper function to transfer a torch.tensor to NDArray.)r.   r/   r-   ndarraycpur1   r4   )torch_tensor)tvmr   r   to_tvm_tensor   s   ztvm.<locals>.to_tvm_tensorc                     s   dd | D }   \}}dd | D }t|dD ])\}}| dkrD|jr,| }d| }||vr<td| q || q 	   fddt
  D S )	Nc                 S   s   g | ]}|  qS r   )
contiguous)r   ar   r   r   r          z)tvm.<locals>.exec_tvm.<locals>.<listcomp>c                 S   s   h | ]\}}|qS r   r   )r   name_r   r   r   	<setcomp>   r@   z(tvm.<locals>.exec_tvm.<locals>.<setcomp>r   r   z6input %s skipped as not found in tvm's runtime libraryc                    s   g | ]	}  |qS r   )
get_output)r   r   )mr7   r   r   r      s    )get_input_infoitems	enumeratedimrequires_graddetachlogwarning	set_inputrunrangeget_num_outputs)i_argsargs
shape_inforB   active_inputsr   arginp_name)rE   r7   r=   r   r   exec_tvm   s*   
ztvm.<locals>.exec_tvm)2r<   r   tvm.contribr   r/   jittracer   rH   lenrL   rM   forwardfrontendfrom_pytorchtyper   indexr    r:   Targetllvm_targetgetosenvironr   tempfileNamedTemporaryFilepathexistsextract_tasksTaskSchedulerTuningOptionsRecordToFiletune	ExceptionunlinkApplyHistoryBest	transformPassContextbuildr"   TemporaryDirectoryr2   	cpu_countrelay_integration
tune_relaycompile_relayNotImplementedErrorGraphModule)gmexample_inputsr   r   r   jit_moddevice
shape_listexample_outputsr&   r!   devr    r
   r   r   r   log_filetaskstask_weightstunertune_optionlibmsr'   r+   rX   r   )rE   r7   r=   r<   r   r<   +   s   










	r<   r"   )r
   r   c                   C   s&   zt d W dS  ty   Y dS w )Nr<   TF)	importlibimport_moduleImportErrorr   r   r   r   has_tvm   s   
r   c                  C   s2   t jdkrtd } d| v rdS d| v rdS dS )Nlinuxz/proc/cpuinfoavx512zllvm -mcpu=skylake-avx512avx2zllvm -mcpu=core-avx2llvm)sysplatformopenread)cpuinfor   r   r   rc      s   
rc   )__doc__	functoolsr   loggingre   r   rg   typesr   typingr   r/   commonr   r   registryr   	getLogger__name__rL   r<   partialtvm_meta_scheduletvm_auto_schedulerr   	lru_cacherc   r   r   r   r   <module>   s6   

 