
    \h                     p    S SK r S SKr\ R                  " S5        S rS rS rSS jrS rS rSS	 jr	S
 r
g)    NzThe 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. Please use the 'torchvision.transforms.functional' module instead.c                     [         R                  " U 5      (       d  [        S[        U 5      -  5      eU R	                  5       S:X  d  [        SU R                  5       -  5      eg)Nzclip should be Tensor. Got %s   zclip should be 4D. Got %dDT)torch	is_tensor	TypeErrortype
ndimension
ValueErrordimclips    b/var/www/fran/franai/venv/lib/python3.13/site-packages/torchvision/transforms/_functional_video.py_is_tensor_video_clipr      sO    ??4  7$t*DEE??!5
BCC    c                 r    [        U R                  5       5      S:w  a  [        S5      eU SXU-   2X"U-   24   $ )zO
Args:
    clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
r   zclip should be a 4D tensor.)lensizer
   )r   ijhws        r   cropr      sA    
 499;1566QQYE	)**r   c                     [        U5      S:w  a  [        SU 35      e[        R                  R                  R                  XUSS9$ )N   z9target size should be tuple (height, width), instead got F)r   modealign_corners)r   r
   r   nn
functionalinterpolate)r   target_sizeinterpolation_modes      r   resizer"       sH    
;1TU`Tabcc88**4HZjo*ppr   c                 n    [        U 5      (       d  [        S5      e[        XX#U5      n [        XU5      n U $ )a  
Do spatial cropping and resizing to the video clip
Args:
    clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
    i (int): i in (i,j) i.e coordinates of the upper left corner.
    j (int): j in (i,j) i.e coordinates of the upper left corner.
    h (int): Height of the cropped region.
    w (int): Width of the cropped region.
    size (tuple(int, int)): height and width of resized clip
Returns:
    clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
 clip should be a 4D torch.tensor)r   r
   r   r"   )r   r   r   r   r   r   r!   s          r   resized_cropr%   &   s;     !&&;<<q!D$01DKr   c                 *   [        U 5      (       d  [        S5      eU R                  S5      U R                  S5      p2Uu  pEX$:  d  X5:  a  [        S5      e[        [	        X$-
  S-  5      5      n[        [	        X5-
  S-  5      5      n[        XXtU5      $ )Nr$   z2height and width must be no smaller than crop_sizeg       @)r   r
   r   introundr   )r   	crop_sizer   r   thtwr   r   s           r   center_cropr.   :   s     &&;<<99R=$))B-qFBvMNNE16S.!"AE16S.!"A##r   c                     [        U 5        U R                  [        R                  :X  d!  [	        S[        U R                  5      -  5      eU R                  5       R                  SSSS5      S-  $ )z
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
Args:
    clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
    clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
z/clip tensor should have data type uint8. Got %s   r      r   g     o@)r   dtyper   uint8r   strfloatpermuter   s    r   	to_tensorr7   G   sZ     $::$ICPTPZPZO[\\::<1a+e33r   c                 r   [        U 5      (       d  [        S5      eU(       d  U R                  5       n [        R                  " XR
                  U R                  S9n[        R                  " X R
                  U R                  S9nU R                  USS2SSS4   5      R                  USS2SSS4   5        U $ )z
Args:
    clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
    mean (tuple): pixel RGB mean. Size is (3)
    std (tuple): pixel standard deviation. Size is (3)
Returns:
    normalized clip (torch.tensor): Size is (C, T, H, W)
r$   )r2   deviceN)	r   r
   cloner   	as_tensorr2   r9   sub_div_)r   meanstdinplaces       r   	normalizerA   V   s     !&&;<<zz|??4zz$++FD
//#ZZ
DCIId1dD$&'(--c!T42E.FGKr   c                 Z    [        U 5      (       d  [        S5      eU R                  S5      $ )z
Args:
    clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
    flipped clip (torch.tensor): Size is (C, T, H, W)
r$   r(   )r   r
   flipr   s    r   hfliprD   i   s)     !&&;<<99R=r   )bilinear)F)warningsr   warnr   r   r"   r%   r.   r7   rA   rD    r   r   <module>rI      sH      	I+q(
$4&	r   