o
    Wh1                     @   sn   d Z ddlZddlmZ ddlZddlZddlmZm	Z	m
Z
 ddlmZmZ dd
dZddejd	fddZdS )zlFunctions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.    N)deepcopy)DEFAULT_CFGLOGGERcolorstr)autocastprofile  T   c                 C   sb   t |d" tt|  |d|  k rdk rn n|nd|dW  d   S 1 s*w   Y  dS )a  
    Compute optimal YOLO training batch size using the autobatch() function.

    Args:
        model (torch.nn.Module): YOLO model to check batch size for.
        imgsz (int, optional): Image size used for training.
        amp (bool, optional): Use automatic mixed precision if True.
        batch (float, optional): Fraction of GPU memory to use. If -1, use default.
        max_num_obj (int, optional): The maximum number of objects from dataset.

    Returns:
        (int): Optimal batch size computed using the autobatch() function.

    Notes:
        If 0.0 < batch < 1.0, it's used as the fraction of GPU memory to use.
        Otherwise, a default fraction of 0.6 is used.
    )enabledg        g      ?333333?)fractionmax_num_objN)r   	autobatchr   train)modelimgszampbatchr    r   O/var/www/vscode/kcb/lib/python3.10/site-packages/ultralytics/utils/autobatch.pycheck_train_batch_size   s
   ,$r   r   c                    s  t d}t| d  d|d  d t|  j}|jdv r,t| d|  |S tj	j
jr=t| d|  |S d	}d
tdd d  }tj|}	|	j| tj|| }
tj|| }|
|  }t| | d|	j ddd|
dd|dd|dd dk rg dng d}zz fdd|D }t|| d||dfddtt|D }|rt| ng g f\}}tj||dd}tt|| |d  |d  }dv rd}||| kr|t|d d }|dk s|dkrt| d | d!| d" |}t|||
 |  }t| d#| d$| d%| dd&dd'|d d(d) |W W tj  S  t yj } zt| d*| d+| d" |W  Y d}~W tj  S d}~ww tj  w ),a>  
    Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.

    Args:
        model (torch.nn.Module): YOLO model to compute batch size for.
        imgsz (int, optional): The image size used as input for the YOLO model.
        fraction (float, optional): The fraction of available CUDA memory to use.
        batch_size (int, optional): The default batch size to use if an error is detected.
        max_num_obj (int, optional): The maximum number of objects from dataset.

    Returns:
        (int): The optimal batch size.
    zAutoBatch: z'Computing optimal batch size for imgsz=z at d   z% CUDA memory utilization.>   cpumpsz4intended for CUDA devices, using default batch-size zHRequires torch.backends.cudnn.benchmark=False, using default batch-size i   @zCUDA:CUDA_VISIBLE_DEVICES0r   z (z) z.2fz	G total, zG reserved, zG allocated, zG free   )r
            r   )r
   r   r   r    r       @   c                    s   g | ]
}t |d   qS )   )torchempty).0b)r   r   r   
<listcomp>L   s    zautobatch.<locals>.<listcomp>r
   )ndevicer   c                    s~   g | ];\}\}}|r=t |d  ttfr=d|d    k rk r=n n|dks7 |d  r7|d   |d  d  kr||d  gqS )r   r   r
   )
isinstanceintfloat)r&   ixy)resultstr   r   r(   P   s    
,
)degNi   zbatch=z. outside safe range, using default batch-size .zUsing batch-size z for  zG/zG (z.0fu   %) ✅zerror detected: z,  using default batch-size )!r   r   infonext
parametersr*   typewarningr$   backendscudnn	benchmarkosgetenvstripcudaget_device_propertiestotal_memorymemory_reservedmemory_allocatednamer   	enumeratezipnppolyfitr,   roundindexmaxpolyvalempty_cache	Exception)r   r   r   
batch_sizer   prefixr*   gbd
propertiesrafbatch_sizesimgxyfit_xfit_ypr'   r.   er   )r   r1   r2   r   r   &   sZ    


< 
<r   )r   Tr	   r
   )__doc__r>   copyr   numpyrI   r$   ultralytics.utilsr   r   r   ultralytics.utils.torch_utilsr   r   r   r   r   r   r   r   r   <module>   s   
