o
    Wñhcš  ã                   @   sü   d dl Z d dlZd dlZd dlZd dlmZmZ d dlmZ d dl	m
Z
mZmZ d dlZd dlZd dlZd dlmZ d dlmZ d dlmZmZmZmZmZmZmZmZ d dlmZm Z m!Z!m"Z"m#Z# d dl$m%Z%m&Z& d	d
„ Z'ddd„Z(G dd„ dej)ƒZ*dS )é    N)ÚOrderedDictÚ
namedtuple)ÚPath)ÚListÚOptionalÚUnion)ÚImage)ÚARM64Ú	IS_JETSONÚIS_RASPBERRYPIÚLINUXÚLOGGERÚPYTHON_VERSIONÚROOTÚ	yaml_load)Úcheck_requirementsÚcheck_suffixÚcheck_versionÚ
check_yamlÚis_rockchip)Úattempt_download_assetÚis_urlc              
      sÄ   t | tƒrtt| ƒƒ} t | tƒr`dd„ |  ¡ D ƒ} t| ƒ}t|  ¡ ƒ|kr?t|› d|d › dt	|  ¡ ƒ› dt|  ¡ ƒ› dƒ‚t | d t
ƒr`| d  d	¡r`ttd
 ƒd ‰ ‡ fdd„|  ¡ D ƒ} | S )z7Check class names and convert to dict format if needed.c                 S   s   i | ]\}}t |ƒt|ƒ“qS © )ÚintÚstr©Ú.0ÚkÚvr   r   úN/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/nn/autobackend.pyÚ
<dictcomp>   s    z%check_class_names.<locals>.<dictcomp>z(-class dataset requires class indices 0-é   z%, but you have invalid class indices ú-z defined in your dataset YAML.r   Ún0zcfg/datasets/ImageNet.yamlÚmapc                    s   i | ]	\}}|ˆ | “qS r   r   r   ©Ú	names_mapr   r   r    %   s    )Ú
isinstanceÚlistÚdictÚ	enumerateÚitemsÚlenÚmaxÚkeysÚKeyErrorÚminr   Ú
startswithr   r   )ÚnamesÚnr   r%   r   Úcheck_class_names   s"   


ÿ
ÿÿr4   c                 C   s<   | rz	t t| ƒƒd W S  ty   Y nw dd„ tdƒD ƒS )zSApplies default class names to an input YAML file or returns numerical class names.r2   c                 S   ó   i | ]}|d |› “qS ©Úclassr   ©r   Úir   r   r   r    0   ó    z'default_class_names.<locals>.<dictcomp>éç  )r   r   Ú	ExceptionÚrange)Údatar   r   r   Údefault_class_names)   s   ÿr?   c                       s¬   e Zd ZdZe ¡ de d¡ddddddfdeee	e ej
jf d	ejd
edeeeef  dedededef‡ fdd„ƒZddd„Zdd„ Zddd„Zeddd„ƒZ‡  ZS )ÚAutoBackenda¤  
    Handles dynamic backend selection for running inference using Ultralytics YOLO models.

    The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
    range of formats, each with specific naming conventions as outlined below:

        Supported Formats and Naming Conventions:
            | Format                | File Suffix       |
            | --------------------- | ----------------- |
            | PyTorch               | *.pt              |
            | TorchScript           | *.torchscript     |
            | ONNX Runtime          | *.onnx            |
            | ONNX OpenCV DNN       | *.onnx (dnn=True) |
            | OpenVINO              | *openvino_model/  |
            | CoreML                | *.mlpackage       |
            | TensorRT              | *.engine          |
            | TensorFlow SavedModel | *_saved_model/    |
            | TensorFlow GraphDef   | *.pb              |
            | TensorFlow Lite       | *.tflite          |
            | TensorFlow Edge TPU   | *_edgetpu.tflite  |
            | PaddlePaddle          | *_paddle_model/   |
            | MNN                   | *.mnn             |
            | NCNN                  | *_ncnn_model/     |
            | IMX                   | *_imx_model/      |
            | RKNN                  | *_rknn_model/     |

    Attributes:
        model (torch.nn.Module): The loaded YOLO model.
        device (torch.device): The device (CPU or GPU) on which the model is loaded.
        task (str): The type of task the model performs (detect, segment, classify, pose).
        names (dict): A dictionary of class names that the model can detect.
        stride (int): The model stride, typically 32 for YOLO models.
        fp16 (bool): Whether the model uses half-precision (FP16) inference.

    Methods:
        forward: Run inference on an input image.
        from_numpy: Convert numpy array to tensor.
        warmup: Warm up the model with a dummy input.
        _model_type: Determine the model type from file path.

    Examples:
        >>> model = AutoBackend(weights="yolov8n.pt", device="cuda")
        >>> results = model(img)
    z
yolo11n.ptÚcpuFNr!   TÚweightsÚdeviceÚdnnr>   Úfp16ÚbatchÚfuseÚverbosec	           q         s  t ƒ  ¡  tt|tƒr|d n|ƒ}	t|tjjƒ}
|  |	¡\}}}}}}}}}}}}}}}}}||p=|p=|p=|p=|p=|
p=|M }|pJ|pJ|pJ|pJ|pJ|}d\}}d\}} d\}!}"}#t|tj	ƒogtj
 ¡ og|jdk}$|$r{t|
|||||gƒs{t 	d¡}d}$|s…|s…|
s…t|	ƒ}	|
rÊ| |¡}!|r”|!j|d}!t|!dƒrœ|!j}%tt|!j ¡ ƒd	ƒ}t|!d
ƒr¯|!jjn|!j}&|r¸|! ¡ n|! ¡  |!j dd¡}|!| _d}n|rddlm}' |'t|tƒrÛ|n|	|d|d}!t|!dƒrê|!j}%tt|!j ¡ ƒd	ƒ}t|!d
ƒrý|!jjn|!j}&|r|! ¡ n|! ¡  |!j dd¡}|!| _nÄ|rQddl}(t  !d|	› d¡ ddi})tj"j#|	|)|d}!|r;|! ¡ n|! ¡  |)d rOt$j%|)d dd„ d}"nŠ|rit  !d|	› d¡ t&dƒ t'j( )|	¡}*nr|so|rkt  !d|	› d¡ t&d|$rdndfƒ t*s‰t+rt&dƒ ddl,}+d g},|$r±d!|+ -¡ v r¥|, .dd!¡ nt  /d"¡ t 	d¡}d}$t  !d#|,d › ¡ |rÆ|+j0|	|,d$}-n4t&g d%¢ƒ t1t2|	ƒ 3d&¡ƒ}	t  !d|	› d'¡ ddl4}.dd(l5m6}/ |. 7¡ }0d|0_8|+j0|	|0d gd$}-d)}#d*d+„ |- 9¡ D ƒ}1|- :¡ j;}"t|- 9¡ d j<d tƒ} d,|- =¡ d jv }| si|- >¡ }2g }3|- 9¡ D ]>}4d,|4jv }5tj?|4j<|5r;tj@ntjAd- |¡}6|2jB|4jC|j|$rO|jDnd|5rVtEj@ntEjAtF|6j<ƒ|6 G¡ d. |3 H|6¡ q*np|rt  !d|	› d/¡ t&d0ƒ ddlI}7|7 J¡ }8d1}9t|tƒr°| Kd2¡r°| Ld3¡d4  M¡ }9t 	d¡}|9|8jNvr°t  /d5|9› d6¡ d1}9t2|	ƒ}	|	 O¡ sÀt1|	 3d7¡ƒ}	|8jPt|	ƒ|	 Qd8¡d9}:|: R¡ d  S¡ j?râ|: R¡ d  T|7 Ud:¡¡ |d4kréd;nd<};t  !d=|;› d>|› d?¡ |8jV|:|9d@|;idA}<|< W¡  X¡ }=|	jYdB }"nÍ|rØt  !d|	› dC¡ t+r'tZt[dDƒr't&dƒ zddl\}>W n t]yB   t^r<t&dEƒ ddl\}>Y nw tZ|>j_dFddG tZ|>j_dHdIdJ |jdkr^t 	dK¡}t`dLdMƒ}?|> a|>jajb¡}@tc|	dNƒ\}A|> d|@¡F}Bztje|A fdO¡dPdQ}Ct$ %|A f|C¡ gdR¡¡}"W n thyœ   |A id¡ Y nw |" dSd¡}D|Ddur­t|Dƒ|B_j|B k|A f¡ ¡}!W d  ƒ n	1 s¿w   Y  W d  ƒ n	1 sÏw   Y  z|! l¡ }EW n tmyó }F zt  ndT|>j_› dU¡ |F‚d}F~Fww toƒ }3g }1d}d} t|!dVƒ }G|Grtp|!jqƒntp|!jrƒ}H|HD ]±}I|Gr`|! s|I¡}J|> t|! u|J¡¡}K|! v|J¡|>jwjxk}L|LrSdWtF|! y|J¡ƒv rJd} |E z|JtF|! {|Jd¡d4 ƒ¡ |KtEj@krRd}n|1 H|J¡ tF|E y|J¡ƒ}MnG|! ||I¡}J|> t|! }|I¡¡}K|! ~|I¡}L|! ~|I¡r›dWtF|! |I¡ƒv r’d} |E €|ItF|! d|I¡d4 ƒ¡ |KtEj@kršd}n|1 H|J¡ tF|E |I¡ƒ}Mt ‚tEj?|M|Kd-¡ |¡}N|?|J|K|M|Nt|N G¡ ƒƒ|3|J< qtodXdY„ |3 ƒ¡ D ƒƒ}O|3dZ j<d }Pn|rõt  !d|	› d[¡ ddl„}Q|Qj… †|	¡}!t‡|!jˆƒ}"næ|rt  !d|	› d\¡ ddl‰‰d}R|RrˆjŠj… ‹|	¡nˆjŒ #|	¡}!t2|	ƒdB }"n¼|rˆt  !d|	› d]¡ ddl‰‰dd^lmŽ}S ‡fd_d`„}Tˆ ¡  ¡ }Utc|	dNƒ}A|U ‘|A f¡ ¡ W d  ƒ n	1 sYw   Y  |T|Uda|S|Uƒdb}Vzt1t2|	ƒ ’¡ jY “t2|	ƒj”› dc¡ƒ}"W n^ t•y‡   Y nTw |sŽ|rRz
dddl–m—}Wm˜}X W n t]y°   ddl‰‰ˆj™j—ˆj™jšj˜}W}XY nw |rît|ƒ Kde¡rÂ|dd… ndf}t  !d|	› dg|d4d… › dh¡ didjdkdlœt› œ¡  }Y|W|	|X|Ydm|idngdo}Zd}nt  !d|	› dp¡ |W|	dq}Z|Z ¡  |Z ž¡ }[|Z Ÿ¡ }\z8t  ¡|	dr¡&}]|] ¢¡ d }J|] f|J¡ gdR¡}^|Jdskr)t$ %|^¡}"nt£ ¤|^¡}"W d  ƒ n	1 s9w   Y  W nš t j¥t¦t§t$j¨fyQ   Y nŠw |rYt©dtƒ‚|rât  !d|	› du¡ t&|$rkdvndwƒ ddlªm«}_ t2|	ƒ}	dx\}`}a|	 ¬¡ r’t1|	 “dy¡dƒ}`t1|	 “dz¡dƒ}an|	j­d{krŸ|	 ®d|¡}`|	}a|`r¯|ar¯|` O¡ r¯|a O¡ s·t¯d}|	› d~ƒ‚|_ °t|`ƒt|aƒ¡}b|$rË|bj±ddd€ |_ ²|b¡}c|c ³|c ´¡ d ¡}d|c µ¡ }1|	dB }"nù|r(t  !d|	› d¡ t&d‚ƒ ddl¶}eddl·‰ dƒd„|e ¸¡ d4 d… d†œ}bˆ j ¹|bf¡}fˆ jjº|	g g |fdd‡}*‡ fdˆd‰„}gt$ %|* »¡ dŠ ¡}"n³|rpt  !d|	› d‹¡ t&t¼r:dŒndƒ ddl½}h|h ¾¡ }*|$|*j¿_Àt2|	ƒ}	|	 O¡ sYt1|	 3dŽ¡ƒ}	|* Át|	ƒ¡ |* ‹t|	 Qd8¡ƒ¡ |	jYdB }"nk|r…t&dƒ ddlÂmÃ}i |i|	ƒ}!|!jÄ}"nV|rÇtÅƒ stÆd‘ƒ‚t  !d|	› d’¡ t&d“ƒ dd”lÇmÈ}j t2|	ƒ}	|	 O¡ s³t1|	 “d•¡ƒ}	|jƒ }k|k Ét|	ƒ¡ |k Ê¡  |	jYdB }"ndd–lmË}l tÌd—|	› d˜|lƒ d™ › dšƒ‚t|"tt2fƒrît2|"ƒ Í¡ rîtÎ|"ƒ}"|"rSt|"t‡ƒrS|" ƒ¡ D ]#\}m}n|md›v rt|nƒ|"|m< qû|mdœv rt|ntƒrtÏ|nƒ|"|m< qû|"d }|"dž }#|"dŸ }|"d  }o|"d¡ }&|" d¡}%|" d¢i ¡ d£d¡}|" d¢i ¡ d¤| ¡} |" dd¡}n|se|se|
set  /d¥|› d¦¡ d¡tÐƒ vrotÑ|ƒ}&tÒ|&ƒ}&|r|! Ó¡ D ]}pd|p_Ôqz| jÕ ÖtÐƒ ¡ dS )§a»  
        Initialize the AutoBackend for inference.

        Args:
            weights (str | List[str] | torch.nn.Module): Path to the model weights file or a module instance.
            device (torch.device): Device to run the model on.
            dnn (bool): Use OpenCV DNN module for ONNX inference.
            data (str | Path | optional): Path to the additional data.yaml file containing class names.
            fp16 (bool): Enable half-precision inference. Supported only on specific backends.
            batch (int): Batch-size to assume for inference.
            fuse (bool): Fuse Conv2D + BatchNorm layers for optimization.
            verbose (bool): Enable verbose logging.
        r   )é    é   )FF)NNNrA   F)rH   Ú	kpt_shaperI   ÚmoduleÚchannelsrJ   T)Úattempt_load_weights)rC   ÚinplacerG   NzLoading z for TorchScript inference...z
config.txtÚ )Ú_extra_filesÚmap_locationc                 S   s   t |  ¡ ƒS ©N)r)   r+   ©Úxr   r   r   Ú<lambda>Æ   s    z&AutoBackend.__init__.<locals>.<lambda>)Úobject_hookz! for ONNX OpenCV DNN inference...zopencv-python>=4.5.4z for ONNX Runtime inference...Úonnxzonnxruntime-gpuÚonnxruntimeznumpy==1.23.5ÚCPUExecutionProviderÚCUDAExecutionProviderz4Failed to start ONNX Runtime with CUDA. Using CPU...zUsing ONNX Runtime )Ú	providers)z model-compression-toolkit>=2.3.0z sony-custom-layers[torch]>=0.3.0zonnxruntime-extensionsz*.onnxz for ONNX IMX inference...)Únms_ortÚdetectc                 S   s   g | ]}|j ‘qS r   ©Úname©r   rU   r   r   r   Ú
<listcomp>ð   ó    z(AutoBackend.__init__.<locals>.<listcomp>Úfloat16)Údtype©r`   Údevice_typeÚ	device_idÚelement_typeÚshapeÚ
buffer_ptrz for OpenVINO inference...zopenvino>=2024.0.0ÚAUTOÚintelú:r!   zOpenVINO device 'z&' not available. Using 'AUTO' instead.z*.xmlz.bin)ÚmodelrB   ÚNCHWÚCUMULATIVE_THROUGHPUTÚLATENCYzUsing OpenVINO z mode for batch=z inference...ÚPERFORMANCE_HINT)Údevice_nameÚconfigzmetadata.yamlz for TensorRT inference...z<=3.8.0ztensorrt>7.0.0,!=10.1.0z>=7.0.0)Úhardz!=10.1.0z5https://github.com/ultralytics/ultralytics/pull/14239)Úmsgzcuda:0ÚBinding)r`   re   rj   r>   ÚptrÚrbé   Úlittle)Ú	byteorderzutf-8Údlaz6TensorRT model exported with a different version than Ú
Únum_bindingséÿÿÿÿc                 s   s    | ]
\}}||j fV  qd S rS   )ry   )r   r3   Údr   r   r   Ú	<genexpr>n  s   € z'AutoBackend.__init__.<locals>.<genexpr>Úimagesz for CoreML inference...z' for TensorFlow SavedModel inference...z% for TensorFlow GraphDef inference...)Ú
gd_outputsc                    sB   ˆj j ‡ ‡fdd„g ¡}|jj}| ˆj ||¡ˆj ||¡¡S )z"Wrap frozen graphs for deployment.c                      s   ˆj jjˆ ddS )NrP   r_   )ÚcompatÚv1Úimport_graph_defr   )ÚgdÚtfr   r   rV   ‹  rc   zAAutoBackend.__init__.<locals>.wrap_frozen_graph.<locals>.<lambda>)r†   r‡   Úwrap_functionÚgraphÚas_graph_elementÚpruneÚnestÚmap_structure)r‰   ÚinputsÚoutputsrU   Úge)rŠ   )r‰   r   Úwrap_frozen_graph‰  s    z/AutoBackend.__init__.<locals>.wrap_frozen_graphzx:0)r‘   r’   z_saved_model*/metadata.yaml)ÚInterpreterÚload_delegateÚtpuz:0z on device z* for TensorFlow Lite Edge TPU inference...zlibedgetpu.so.1zlibedgetpu.1.dylibzedgetpu.dll)ÚLinuxÚDarwinÚWindowsrC   )Úoptions)Ú
model_pathÚexperimental_delegatesz! for TensorFlow Lite inference...)rœ   Úrzmetadata.jsonz2YOLOv8 TF.js inference is not currently supported.z for PaddlePaddle inference...zpaddlepaddle-gpuzpaddlepaddle>=3.0.0)NNz*.jsonz*.pdiparamsz
.pdiparamsz
model.jsonzPaddle model not found in z/. Both .json and .pdiparams files are required.i   )Úmemory_pool_init_size_mbrh   z for MNN inference...ÚMNNÚlowÚCPUé   )Ú	precisionÚbackendÚ	numThread)Úruntime_managerÚ	rearrangec                    s   ˆ j  |  ¡ | j¡S rS   )ÚexprÚconstÚdata_ptrrj   rT   )r    r   r   Útorch_to_mnnç  s   z*AutoBackend.__init__.<locals>.torch_to_mnnÚbizCodez for NCNN inference...z'git+https://github.com/Tencent/ncnn.gitÚncnnz*.paramztritonclient[all])ÚTritonRemoteModelz5RKNN inference is only supported on Rockchip devices.z for RKNN inference...zrknn-toolkit-lite2)ÚRKNNLitez*.rknn©Úexport_formatszmodel='z9' is not a supported model format. Ultralytics supports: ÚFormatz9
See https://docs.ultralytics.com/modes/predict for help.>   rF   ÚstriderM   >   ÚargsÚimgszr2   rK   r´   ÚtaskrF   r¶   r2   rµ   ÚnmsÚdynamiczMetadata not found for 'model=ú')×ÚsuperÚ__init__r   r'   r(   ÚtorchÚnnÚModuleÚ_model_typerC   ÚcudaÚis_availableÚtypeÚanyr   ÚtorG   ÚhasattrrK   r-   r   r´   rL   r2   ÚhalfÚfloatÚyamlÚgetro   Úultralytics.nn.tasksrN   Útorchvisionr   ÚinfoÚjitÚloadÚjsonÚloadsr   Úcv2rD   ÚreadNetFromONNXr   r
   rY   Úget_available_providersÚinsertÚwarningÚInferenceSessionÚnextr   ÚglobÚmct_quantizersÚsony_custom_layers.pytorch.nmsr]   Úget_ort_session_optionsÚenable_mem_reuseÚget_outputsÚget_modelmetaÚcustom_metadata_maprj   Ú
get_inputsÚ
io_bindingÚemptyrd   Úfloat32Úbind_outputr`   ÚindexÚnpÚtupler«   ÚappendÚopenvinoÚCorer1   ÚsplitÚupperÚavailable_devicesÚis_fileÚ
read_modelÚwith_suffixÚget_parametersÚ
get_layoutÚ
set_layoutÚLayoutÚcompile_modelÚinputÚget_any_nameÚparentr   r   ÚtensorrtÚImportErrorr   Ú__version__r   ÚLoggerÚINFOÚopenÚRuntimeÚ
from_bytesÚreadÚdecodeÚUnicodeDecodeErrorÚseekÚDLA_coreÚdeserialize_cuda_engineÚcreate_execution_contextr<   Úerrorr   r=   Únum_io_tensorsr€   Úget_tensor_nameÚnptypeÚget_tensor_dtypeÚget_tensor_modeÚTensorIOModeÚINPUTÚget_tensor_shapeÚset_input_shapeÚget_tensor_profile_shapeÚget_binding_nameÚget_binding_dtypeÚbinding_is_inputÚget_binding_shapeÚset_binding_shapeÚget_profile_shapeÚ
from_numpyr+   ÚcoremltoolsÚmodelsÚMLModelr)   Úuser_defined_metadataÚ
tensorflowÚkerasÚ
load_modelÚsaved_modelÚultralytics.engine.exporterr…   ÚGraphÚas_graph_defÚParseFromStringÚresolveÚrglobÚstemÚStopIterationÚtflite_runtime.interpreterr•   r–   ÚliteÚexperimentalÚplatformÚsystemÚallocate_tensorsÚget_input_detailsÚget_output_detailsÚzipfileÚZipFileÚnamelistÚastÚliteral_evalÚ
BadZipFileÚSyntaxErrorÚ
ValueErrorÚJSONDecodeErrorÚNotImplementedErrorÚpaddle.inferenceÚ	inferenceÚis_dirÚsuffixÚ	with_nameÚFileNotFoundErrorÚConfigÚenable_use_gpuÚcreate_predictorÚget_input_handleÚget_input_namesÚget_output_namesÚosr    Ú	cpu_countÚcreate_runtime_managerÚload_module_from_fileÚget_infor	   r®   ÚNetÚoptÚuse_vulkan_computeÚ
load_paramÚultralytics.utils.tritonr¯   Úmetadatar   ÚOSErrorÚrknnlite.apir°   Ú	load_rknnÚinit_runtimer²   Ú	TypeErrorÚexistsr   ÚevalÚlocalsr?   r4   Ú
parametersÚrequires_gradÚ__dict__Úupdate)qÚselfrB   rC   rD   r>   rE   rF   rG   rH   ÚwÚ	nn_moduleÚptrÎ   rX   ÚxmlÚengineÚcoremlr"  ÚpbÚtfliteÚedgetpuÚtfjsÚpaddleÚmnnr®   ÚimxÚrknnÚtritonÚnhwcr´   ÚchÚend2endr¹   ro   rS  r·   rÁ   rK   r2   rN   rÌ   Úextra_filesÚnetrY   r\   ÚsessionÚmctqr]   Úsession_optionsÚoutput_namesÚioÚbindingsÚoutputÚout_fp16Úy_tensorÚovÚcorert   Úov_modelÚinference_modeÚov_compiled_modelÚ
input_nameÚtrtrx   ÚloggerÚfÚruntimeÚmeta_lenr~   ÚcontextÚeÚis_trt10Únumr9   r`   re   Úis_inputrj   ÚimÚbinding_addrsÚ
batch_sizeÚctr   r…   r”   r‰   Úfrozen_funcr•   r–   ÚdelegateÚinterpreterÚinput_detailsÚoutput_detailsÚzfÚcontentsÚpdiÚ
model_fileÚparams_fileru   Ú	predictorÚinput_handlerI  Úrtr¬   Úpyncnnr¯   r°   Ú
rknn_modelr²   r   r   r¶   Úp©Ú	__class__)r    rŠ   r   r¼   a   sò  
î 
 


€ÿ
€
€
€

ÿ

$ú€

ýý

ÿ

÷€ €þ
€


€
  ÿ*ÿý  
ÿþ


€ú€ÿ

 



ÿ
€

zAutoBackend.__init__c                    sB	  |j \}}}}	ˆjr|jtjkr| ¡ }ˆjr| dddd¡}ˆjs%ˆj	r4ˆj
|f|||dœ|¤Ž}
nˆjr>ˆ 
|¡}
nˆjrT| ¡  ¡ }ˆj |¡ ˆj ¡ }
núˆjsZˆjrÐˆjrt| ¡  ¡ }ˆj ˆjˆj ¡ d j|i¡}
n5ˆjs{| ¡ }ˆjjd|jj|jjdkrŒ|jjndˆjr“t jnt j!t"|j ƒ| #¡ d ˆj $ˆj¡ ˆj%}
ˆjrÎt j&|
d |
d d	d	…d	d	…d	f |
d d	d	…d	d	…d	f gd
d}
n~ˆj'r/| ¡  ¡ }ˆj(dv r$|j d }d	g| ‰‡fdd„}ˆj) *ˆj+¡}| ,|¡ t-|ƒD ]}|j.ˆj/|||d … i|d q | 0¡  t  &dd„ ˆD ƒ¡}
n*t1ˆ +|¡ 2¡ ƒ}
nˆj3rêˆjr¨|j ˆj%d j kr¨ˆj4rqˆj5 6d|j ¡ ˆj%d j7|j dˆj%d< ˆjD ]}ˆj%| j8 9t"ˆj5 :|¡ƒ¡ q]n7ˆj
 ;d¡}ˆj5 <||j ¡ ˆj%d j7|j dˆj%d< ˆjD ]}ˆj
 ;|¡}ˆj%| j8 9t"ˆj5 =|¡ƒ¡ qˆj%d j }|j |ksÈJ d|j › dˆjrÀdnd› d|› ƒ‚t>| #¡ ƒˆj?d< ˆj5 @t1ˆj? 2¡ ƒ¡ ‡fdd„tAˆjƒD ƒ}
ndˆjBr4|d  ¡  ¡ }tC D|d  Ed¡¡}ˆj
 Fd|i¡}
d|
v rtGd|	› dƒ‚t1|
 2¡ ƒ}
tH|
ƒdkr2tH|
d j ƒdkr2t1tI|
ƒƒ}
nˆjJrY| ¡  ¡  Et j!¡}ˆjK L|¡ ˆjM ¡  ‡fd d„ˆjD ƒ}
nõˆjNrrˆ O|¡}ˆj P|g¡}d!d„ |D ƒ}
nÜˆjQr³ˆjR S|d  ¡  ¡ ¡}ˆj T¡ !‰ ˆ  Uˆj V¡ d |¡ ‡ fd"d„tAˆj ¡ ƒD ƒ}
W d	  ƒ n	1 s¬w   Y  n›ˆjWrÄ| ¡  ¡ }ˆ 
|¡}
nŠˆjXré| ¡  ¡ d  Ed¡}tY|t1t"fƒrÝ|n|g}ˆjZj[|d#}
ne| ¡  ¡ }ˆj\rˆj]rþˆj
|d$d%nˆ 
|¡}
tY|
t1ƒs|
g}
nˆj^rˆj_ˆj` a|¡d&}
nùˆjbd }|d' t jct jdhv }|r@|d( \}}|| |  E|d' ¡}ˆje f|d) |¡ ˆje g¡  g }
ˆjhD ]Â}ˆje i|d) ¡}|rp|d( \}}| Et j!¡| | }|jjdkr|j d
 d*ks‚ˆjkrÏ|d	d	…d	d	…ddgf  |	9  < |d	d	…d	d	…ddgf  |9  < ˆjld+krÎ|d	d	…d	d	…d*d	d…f  |	9  < |d	d	…d	d	…d,d	d…f  |9  < n@|d	d	…ddgf  |	9  < |d	d	…ddgf  |9  < ˆjld+kr|d	d	…d-d	d…f  |	9  < |d	d	…d*d	d…f  |9  < |
 m|¡ qStH|
ƒdkrGtH|
d j ƒdkr-t1tI|
ƒƒ}
|
d j d
 d*kr=|
d g}
n
t  n|
d d.¡|
d< d/d„ |
D ƒ}
tY|
t1t"fƒrœtHˆjoƒd0kr…ˆjld1ksktH|
ƒdkr…|
d j d |
d j d  d }d2d3„ t-|ƒD ƒˆ_otH|
ƒdkr“ˆ p|
d ¡S ‡fd4d„|
D ƒS ˆ p|
¡S )5aG  
        Runs inference on the YOLOv8 MultiBackend model.

        Args:
            im (torch.Tensor): The image tensor to perform inference on.
            augment (bool): Whether to perform data augmentation during inference.
            visualize (bool): Whether to visualize the output predictions.
            embed (list | None): A list of feature vectors/embeddings to return.
            **kwargs (Any): Additional keyword arguments for model configuration.

        Returns:
            (torch.Tensor | List[torch.Tensor]): The raw output tensor(s) from the model.
        r   r£   rJ   r!   )ÚaugmentÚ	visualizeÚembedr„   rÁ   rf   Nr   )Úaxis>   Ú
THROUGHPUTrq   c                    s   | j ˆ |< dS )z8Places result in preallocated list using userdata index.N©Úresults)ÚrequestÚuserdatar©  r   r   Úcallback}  s   z%AutoBackend.forward.<locals>.callback)r‘   r¬  c                 S   s   g | ]
}t | ¡ ƒd  ‘qS )r   )r(   Úvalues)r   rž   r   r   r   rb   ˆ  ó    z'AutoBackend.forward.<locals>.<listcomp>)rj   zinput size ú ú>znot equal toz max model size c                    s   g | ]}ˆ j | j‘qS r   )rz  r>   ra   ©r`  r   r   rb   ¡  r:   éÿ   Úuint8ÚimageÚ
confidenceziUltralytics only supports inference of non-pipelined CoreML models exported with 'nms=False', but 'model=z6' has an NMS pipeline created by an 'nms=True' export.r{   c                    s   g | ]
}ˆ j  |¡ ¡ ‘qS r   )rœ  Úget_output_handleÚcopy_to_cpura   r²  r   r   rb   ¼  r¯  c                 S   s   g | ]}|  ¡ ‘qS r   )r  ra   r   r   r   rb   Â  ó    c                    s$   g | ]}t  ˆ  |¡d  ¡d ‘qS )r!   N)rç   ÚarrayÚextractra   )Úexr   r   rb   Ê  ó   $ )r‘   F)ÚtrainingrT   re   Úquantizationræ   é   Úposeé   é   )r   rJ   r!   r£   c                 S   s$   g | ]}t |tjƒr|n| ¡ ‘qS r   )r'   rç   ÚndarrayÚnumpyra   r   r   r   rb     r½  r;   Úsegmentc                 S   r5   r6   r   r8   r   r   r   r      r:   z'AutoBackend.forward.<locals>.<dictcomp>c                    s   g | ]}ˆ   |¡‘qS r   )r  ra   r²  r   r   rb     s    )qrj   rE   re   r½   rd   rÇ   rp  Úpermuterc  rb  ro   rÎ   rD   rA   rÅ  rt  ÚsetInputÚforwardrX   rm  r¹   ru  Úrunrx  rá   r`   rÁ   ry  Ú
bind_inputrC   rÃ   ræ   rç   rä   rè   r«   Úrun_with_iobindingrz  Úconcatenaterd  r  r~  ÚAsyncInferQueuer‚  Úset_callbackr=   Ústart_asyncrƒ  Úwait_allr(   r®  re  r‹  r‰  r  Ú_replacer>   Úresize_r  Úget_binding_indexr  r  r   r  Ú
execute_v2Úsortedrf  r   Ú	fromarrayÚastypeÚpredictrX  r,   Úreversedrk  r  Úcopy_from_cpurœ  rl  r¬   Ú	onForwardr®   rŸ  ÚMatÚcreate_extractorr÷   Úinput_namesro  rn  r'   r   r>  r"  r   rg  r’  rŠ   Úconstantr•  Úint8Úint16r”  Ú
set_tensorÚinvoker–  Ú
get_tensorÚndimrr  r·   ré   Ú	transposer2   r  )r`  rŽ  r¤  r¥  r¦  ÚkwargsÚbrq  Úhra  Úyr3   r­  Úasync_queuer9   r`   ÚsÚim_pilÚ	input_varÚ
output_varÚmat_inÚdetailsÚis_intÚscaleÚ
zero_pointr{  rU   Úncr   )r¼  rª  r`  r   rÉ  =  s*  "úD€


$
"ÿ
"4
ÿÿ	"€

ý€ €


""$$€* .
zAutoBackend.forwardc                 C   s"   t |tjƒrt |¡ | j¡S |S )z½
        Convert a numpy array to a tensor.

        Args:
            x (np.ndarray): The array to be converted.

        Returns:
            (torch.Tensor): The converted tensor
        )r'   rç   rÄ  r½   ÚtensorrÅ   rC   )r`  rU   r   r   r   r    s   "
zAutoBackend.from_numpy©r!   rJ   é€  rù  c                 C   s–   ddl }| j| j| j| j| j| j| j| jf}t	|ƒrE| j
jdks#| jrGtj|| jr,tjntj| j
dœŽ}t| jr:dndƒD ]}|  |¡ q=dS dS dS )zÕ
        Warm up the model by running one forward pass with a dummy input.

        Args:
            imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width)
        r   NrA   )re   rC   r£   r!   )rÌ   rc  rÎ   rX   re  r"  rg  ro  rb  rÄ   rC   rÃ   r½   rã   rE   rÇ   rÈ   r=   rÉ  )r`  r¶   rÌ   Úwarmup_typesrŽ  Ú_r   r   r   Úwarmup  s   $"ýzAutoBackend.warmupúpath/to/model.ptc                    sÄ   ddl m} |ƒ d }t| ƒst| tƒst| |ƒ t| ƒj‰ ‡ fdd„|D ƒ}|d  ˆ  d¡O  < |d  |d	  M  < t	|ƒrDd
}nddl
m} || ƒ}t|jƒo\t|jƒo\|jdv }||g S )av  
        Takes a path to a model file and returns the model type.

        Args:
            p (str): Path to the model file.

        Returns:
            (List[bool]): List of booleans indicating the model type.

        Examples:
            >>> model = AutoBackend(weights="path/to/model.onnx")
            >>> model_type = model._model_type()  # returns "onnx"
        r   r±   ÚSuffixc                    s   g | ]}|ˆ v ‘qS r   r   )r   rí  r_   r   r   rb   B  r¹  z+AutoBackend._model_type.<locals>.<listcomp>rÃ  z.mlmodelé   é	   F)Úurlsplit>   ÚgrpcÚhttp)r#  r²   r   r'   r   r   r   r`   ÚendswithrÄ   Úurllib.parser  ÚboolÚnetlocÚpathÚscheme)r¡  r²   ÚsfÚtypesro  r  Úurlr   r_   r   rÀ   -  s   



zAutoBackend._model_type)FFN)rø  )rý  )Ú__name__Ú
__module__Ú__qualname__Ú__doc__r½   Úno_gradrC   r   r   r   r¾   r¿   r  r   r   r   r¼   rÉ  r  rü  ÚstaticmethodrÀ   Ú__classcell__r   r   r¢  r   r@   3   sJ    -÷þýüûúùø	÷   
^ V
r@   rS   )+r6  rÐ   r.  r3  Úcollectionsr   r   Úpathlibr   Útypingr   r   r   rÒ   rÅ  rç   r½   Útorch.nnr¾   ÚPILr   Úultralytics.utilsr	   r
   r   r   r   r   r   r   Úultralytics.utils.checksr   r   r   r   r   Úultralytics.utils.downloadsr   r   r4   r?   r¿   r@   r   r   r   r   Ú<module>   s$   (

