
    hR                      S r SSKJr  SSKrSSKrSSKrSSKJr  SSKJ	r	J
r
JrJrJr  SSKrSSKrSSKrSSKJrJrJrJrJrJrJrJrJr  SSKJrJrJrJrJ r J!r!J"r"  SSK#J$r$  SS	K%J&r&J'r'  SSK(J)s  J*s  J+r,  SS
K-J+r.  SSK/J0r0  SS
K1J+r2  SSK3J4r4J5r5  SSK6J7r7J8r8J9r9J:r:J;r;J<r<J=r=  SSK>J?r?J@r@  SSKAJBrBJCrCJDrDJErE  SSKFJGrG  / SQrHSrISrJ " S S\@5      rK " S S\@5      rL " S S\@5      rM " S S\@5      rN " S S\@5      rO " S S\@5      rP " S  S!\@5      rQ " S" S#\@5      rR " S$ S%\@5      rS " S& S'\@5      rT " S( S)\@5      rU " S* S+\@5      rV " S, S-\@5      rW " S. S/\@5      rX " S0 S1\@5      rY " S2 S3\@5      rZ " S4 S5\@5      r[ " S6 S7\@5      r\ " S8 S9\@5      r] " S: S;\@5      r^ " S< S=\@5      r_ " S> S?\@5      r` " S@ SA\@5      ra " SB SC\5      rb " SD SE\@5      rc " SF SG\@5      rd " SH SI\@5      re " SJ SK\@5      rf " SL SM\@5      rg " SN SO\@5      rh " SP SQ\@5      ri " SR SS\@5      rj " ST SU\@5      rk " SV SW\@5      rl " SX SY\@5      rm\n" / \2R                  SZ   R                  5       Q\2R                  S[   R                  5       Q76 \q" \2R                  SZ   R                  5       5      \q" \2R                  S[   R                  5       5      S\S]S^.rr " S_ S`\@5      rs " Sa Sb\@5      rt " Sc Sd\5      ru " Se Sf\u5      rv " Sg Sh\u5      rw " Si Sj\u5      rx " Sk Sl\u5      ry\	\\v\w\x\y4   \" SmSn94   rz " So Sp\@5      r{ " Sq Sr\{5      r| " Ss St\@5      r} " Su Sv\@5      r~ " Sw Sx\@5      r " Sy Sz\@5      r " S{ S|\@5      r " S} S~\@5      rg)a(  Pixel-level transformations for image augmentation.

This module contains transforms that modify pixel values without changing the geometry of the image.
Includes transforms for adjusting color, brightness, contrast, adding noise, simulating weather effects,
and other pixel-level manipulations.
    )annotationsN)Sequence)	AnnotatedAnyCallableUnioncast)	MAX_VALUES_BY_DTYPENUM_MULTI_CHANNEL_DIMENSIONSbatch_transformget_num_channelsis_grayscale_imageis_rgb_imagemultiply	normalizenormalize_per_image)AfterValidator	BaseModel
ConfigDictFieldValidationInfofield_validatormodel_validator)special)LiteralSelf)
functional)BlurInitSchema)check_rangenon_rgb_error)NonNegativeFloatRangeTypeOnePlusFloatRangeTypeOnePlusIntRangeTypeSymmetricRangeTypeZeroOneRangeTypecheck_range_boundsnondecreasing)BaseTransformInitSchemaImageOnlyTransform)MAX_RAIN_ANGLENUM_RGB_CHANNELSPAIRSEVEN)to_tuple),CLAHEAdditiveNoiseAutoContrastChannelShuffleChromaticAberrationColorJitter	DownscaleEmbossEqualizeFancyPCA
GaussNoiseHEStainHueSaturationValueISONoiseIlluminationImageCompression	InvertImgMultiplicativeNoise	NormalizePlanckianJitterPlasmaBrightnessContrastPlasmaShadow	PosterizeRGBShiftRandomBrightnessContrast	RandomFogRandomGammaRandomGravel
RandomRainRandomShadow
RandomSnowRandomSunFlareRandomToneCurveRingingOvershootSaltAndPepperSharpen	ShotNoiseSolarizeSpatterSuperpixelsToGrayToRGBToSepiaUnsharpMask      c                     ^  \ rS rSrSr " S S\5      r     S         SU 4S jjjrSS jr\	" SSS	S
9SS j5       r
\	" SS	SS
9SS j5       r\	" SSSS
9SS j5       rSrU =r$ )rA   w   a  Applies various normalization techniques to an image. The specific normalization technique can be selected
    with the `normalization` parameter.

Standard normalization is applied using the formula:
    `img = (img - mean * max_pixel_value) / (std * max_pixel_value)`.
    Other normalization techniques adjust the image based on global or per-channel statistics,
    or scale pixel values to a specified range.

Args:
    mean (tuple[float, float] | float | None): Mean values for standard normalization.
        For "standard" normalization, the default values are ImageNet mean values: (0.485, 0.456, 0.406).
    std (tuple[float, float] | float | None): Standard deviation values for standard normalization.
        For "standard" normalization, the default values are ImageNet standard deviation :(0.229, 0.224, 0.225).
    max_pixel_value (float | None): Maximum possible pixel value, used for scaling in standard normalization.
        Defaults to 255.0.
    normalization (Literal["standard", "image", "image_per_channel", "min_max", "min_max_per_channel"]):
        Specifies the normalization technique to apply. Defaults to "standard".
        - "standard": Applies the formula `(img - mean * max_pixel_value) / (std * max_pixel_value)`.
            The default mean and std are based on ImageNet. You can use mean and std values of (0.5, 0.5, 0.5)
            for inception normalization. And mean values of (0, 0, 0) and std values of (1, 1, 1) for YOLO.
        - "image": Normalizes the whole image based on its global mean and standard deviation.
        - "image_per_channel": Normalizes the image per channel based on each channel's mean and standard deviation.
        - "min_max": Scales the image pixel values to a [0, 1] range based on the global
            minimum and maximum pixel values.
        - "min_max_per_channel": Scales each channel of the image pixel values to a [0, 1]
            range based on the per-channel minimum and maximum pixel values.

    p (float): Probability of applying the transform. Defaults to 1.0.

Targets:
    image

Image types:
    uint8, float32

Note:
    - For "standard" normalization, `mean`, `std`, and `max_pixel_value` must be provided.
    - For other normalization types, these parameters are ignored.
    - For inception normalization, use mean values of (0.5, 0.5, 0.5).
    - For YOLO normalization, use mean values of (0, 0, 0) and std values of (1, 1, 1).
    - This transform is often used as a final step in image preprocessing pipelines to
      prepare images for neural network input.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> # Standard ImageNet normalization
    >>> transform = A.Normalize(
    ...     mean=(0.485, 0.456, 0.406),
    ...     std=(0.229, 0.224, 0.225),
    ...     max_pixel_value=255.0,
    ...     p=1.0
    ... )
    >>> normalized_image = transform(image=image)["image"]
    >>>
    >>> # Min-max normalization
    >>> transform_minmax = A.Normalize(normalization="min_max", p=1.0)
    >>> normalized_image_minmax = transform_minmax(image=image)["image"]

References:
    - ImageNet mean and std: https://pytorch.org/vision/stable/models.html
    - Inception preprocessing: https://keras.io/api/applications/inceptionv3/

c                  Z    \ rS rSr% S\S'   S\S'   S\S'   S\S'   \" S	S
9SS j5       rSrg)Normalize.InitSchema    tuple[float, ...] | float | Nonemeanstdfloat | Nonemax_pixel_valueSLiteral['standard', 'image', 'image_per_channel', 'min_max', 'min_max_per_channel']normalizationaftermodec                    U R                   b*  U R                  b  U R                  c  U R                  S:X  a  [	        S5      eU $ )NstandardzKmean, std, and max_pixel_value must be provided for standard normalization.)rc   rd   rf   rh   
ValueErrorselfs    g/var/www/fran/franai/venv/lib/python3.13/site-packages/albumentations/augmentations/pixel/transforms.py_validate_normalization,Normalize.InitSchema._validate_normalization   sH     		!88#((0T5G5G:5U a  K     Nreturnr   )__name__
__module____qualname____firstlineno____annotations__r   rr   __static_attributes__ru   rt   rq   
InitSchemar`      s7    ..--%%
 	
 
g	&		 
'		rt   r~   c                $  > [         TU ]  US9  Xl        [        R                  " U[        R
                  S9U-  U l        X l        [        R                  " [        R                  " U[        R
                  S9U-  5      U l	        X0l
        X@l        g )Npdtype)super__init__rc   nparrayfloat32mean_nprd   
reciprocaldenominatorrf   rh   )rp   rc   rd   rf   rh   r   	__class__s         rq   r   Normalize.__init__   sp     	1	xxBJJ7/I==HHS

+o=
  /*rt   c                    U R                   S:X  a!  [        UU R                  U R                  5      $ [	        XR                   5      $ )zApply normalization to the input image.

Args:
    img (np.ndarray): The input image to normalize.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The normalized image.

rm   )rh   r   r   r   r   rp   imgparamss      rq   applyNormalize.apply   sE     +   
 #3(:(:;;rt   channelTFhas_batch_dimhas_depth_dimc                (    U R                   " U40 UD6$ )zApply normalization to a batch of images.

Args:
    images (np.ndarray): Batch of images to normalize with shape (batch, height, width, channels).
    **params (Any): Additional parameters.

Returns:
    np.ndarray: Normalized batch of images.

r   rp   imagesr   s      rq   apply_to_imagesNormalize.apply_to_images        zz&+F++rt   c                (    U R                   " U40 UD6$ )zApply normalization to a 3D volume.

Args:
    volume (np.ndarray): 3D volume to normalize with shape (depth, height, width, channels).
    **params (Any): Additional parameters.

Returns:
    np.ndarray: Normalized 3D volume.

r   rp   volumer   s      rq   apply_to_volumeNormalize.apply_to_volume  r   rt   c                (    U R                   " U40 UD6$ )a
  Apply normalization to a batch of 3D volumes.

Args:
    volumes (np.ndarray): Batch of 3D volumes to normalize with shape (batch, depth, height, width, channels).
    **params (Any): Additional parameters.

Returns:
    np.ndarray: Normalized batch of 3D volumes.

r   rp   volumesr   s      rq   apply_to_volumesNormalize.apply_to_volumes  s     zz',V,,rt   )r   rf   rc   r   rh   rd   ))g
ףp=
?gv/?gCl?)gZd;O?gy&1?g?     o@rm         ?)
rc   rb   rd   rb   rf   re   rh   rg   r   floatr   
np.ndarrayr   r   rw   r   r   r   r   r   rw   r   r   r   r   r   rw   r   r   r   r   r   rw   r   rx   ry   rz   r{   __doc__r(   r~   r   r   r   r   r   r   r}   __classcell__r   s   @rq   rA   rA   w   s    @D, 4 2G0E(- +.+ .+ &	+

+ + +0<& Yd%H, I, Ye4H, I, Yd$G- H-rt   rA   c                     ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjr          SS jrSS jr	Sr
U =r$ )r>   i(  a  Decrease image quality by applying JPEG or WebP compression.

This transform simulates the effect of saving an image with lower quality settings,
which can introduce compression artifacts. It's useful for data augmentation and
for testing model robustness against varying image qualities.

Args:
    quality_range (tuple[int, int]): Range for the compression quality.
        The values should be in [1, 100] range, where:
        - 1 is the lowest quality (maximum compression)
        - 100 is the highest quality (minimum compression)
        Default: (99, 100)

    compression_type (Literal["jpeg", "webp"]): Type of compression to apply.
        - "jpeg": JPEG compression
        - "webp": WebP compression
        Default: "jpeg"

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - This transform expects images with 1, 3, or 4 channels.
    - For JPEG compression, alpha channels (4th channel) will be ignored.
    - WebP compression supports transparency (4 channels).
    - The actual file is not saved to disk; the compression is simulated in memory.
    - Lower quality values result in smaller file sizes but may introduce visible artifacts.
    - This transform can be useful for:
      * Data augmentation to improve model robustness
      * Testing how models perform on images of varying quality
      * Simulating images transmitted over low-bandwidth connections

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.ImageCompression(quality_range=(50, 90), compression_type=0, p=1.0)
    >>> result = transform(image=image)
    >>> compressed_image = result["image"]

References:
    - JPEG compression: https://en.wikipedia.org/wiki/JPEG
    - WebP compression: https://developers.google.com/speed/webp

c                  *    \ rS rSr% S\S'   S\S'   Srg)ImageCompression.InitSchemai_  zeAnnotated[tuple[int, int], AfterValidator(check_range_bounds(1, 100)), AfterValidator(nondecreasing)]quality_rangeLiteral['jpeg', 'webp']compression_typeru   Nrx   ry   rz   r{   r|   r}   ru   rt   rq   r~   r   _  s    
 	
 21rt   r~   c                8   > [         TU ]  US9  X l        Xl        g Nr   )r   r   r   r   )rp   r   r   r   r   s       rq   r   ImageCompression.__init__h  s!     	1* 0rt   c                0    [         R                  " XU5      $ )am  Apply compression to the input image.

Args:
    img (np.ndarray): The input image to be compressed.
    quality (int): Compression quality level (1-100).
    image_type (Literal[".jpg", ".webp"]): File extension indicating compression format.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The compressed image.

)fpixelimage_compression)rp   r   quality
image_typer   s        rq   r   ImageCompression.applyr  s    & ''jAArt   c                v    U R                   S:X  a  SOSnU R                  R                  " U R                  6 US.$ )a  Generate random parameters for the transform.

Returns:
    dict[str, int | str]: Dictionary with the following keys:
        - "quality" (int): Random quality value within the specified range.
        - "image_type" (str): File extension for the chosen compression type.

jpegz.jpgz.webp)r   r   )r   	py_randomrandintr   )rp   r   s     rq   
get_paramsImageCompression.get_params  s?      $44>VG
 ~~--t/A/AB$
 	
rt   )r   r   )r   )c   d         ?)r   r   r   tuple[int, int]r   r   )
r   r   r   intr   zLiteral['.jpg', '.webp']r   r   rw   r   )rw   zdict[str, int | str]rx   ry   rz   r{   r   r(   r~   r   r   r   r}   r   r   s   @rq   r>   r>   (  s    4l2, 2 5;)2	111 '1 	1 1BB B -	B
 B 
B*
 
rt   r>   c                     ^  \ rS rSrSr " S S\5      r    S	       S
U 4S jjjr            SS jr      SS jr	Sr
U =r$ )rM   i  u  Applies a random snow effect to the input image.

This transform simulates snowfall by either bleaching out some pixel values or
adding a snow texture to the image, depending on the chosen method.

Args:
    snow_point_range (tuple[float, float]): Range for the snow point threshold.
        Both values should be in the (0, 1) range. Default: (0.1, 0.3).
    brightness_coeff (float): Coefficient applied to increase the brightness of pixels
        below the snow_point threshold. Larger values lead to more pronounced snow effects.
        Should be > 0. Default: 2.5.
    method (Literal["bleach", "texture"]): The snow simulation method to use. Options are:
        - "bleach": Uses a simple pixel value thresholding technique.
        - "texture": Applies a more realistic snow texture overlay.
        Default: "texture".
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - The "bleach" method increases the brightness of pixels above a certain threshold,
      creating a simple snow effect. This method is faster but may look less realistic.
    - The "texture" method creates a more realistic snow effect through the following steps:
      1. Converts the image to HSV color space for better control over brightness.
      2. Increases overall image brightness to simulate the reflective nature of snow.
      3. Generates a snow texture using Gaussian noise, which is then smoothed with a Gaussian filter.
      4. Applies a depth effect to the snow texture, making it more prominent at the top of the image.
      5. Blends the snow texture with the original image using alpha compositing.
      6. Adds a slight blue tint to simulate the cool color of snow.
      7. Adds random sparkle effects to simulate light reflecting off snow crystals.
      This method produces a more realistic result but is computationally more expensive.

Mathematical Formulation:
    For the "bleach" method:
    Let L be the lightness channel in HLS color space.
    For each pixel (i, j):
    If L[i, j] > snow_point:
        L[i, j] = L[i, j] * brightness_coeff

    For the "texture" method:
    1. Brightness adjustment: V_new = V * (1 + brightness_coeff * snow_point)
    2. Snow texture generation: T = GaussianFilter(GaussianNoise(μ=0.5, sigma=0.3))
    3. Depth effect: D = LinearGradient(1.0 to 0.2)
    4. Final pixel value: P = (1 - alpha) * original_pixel + alpha * (T * D * 255)
       where alpha is the snow intensity factor derived from snow_point.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage (bleach method)
    >>> transform = A.RandomSnow(p=1.0)
    >>> snowy_image = transform(image=image)["image"]

    # Using texture method with custom parameters
    >>> transform = A.RandomSnow(
    ...     snow_point_range=(0.2, 0.4),
    ...     brightness_coeff=2.0,
    ...     method="texture",
    ...     p=1.0
    ... )
    >>> snowy_image = transform(image=image)["image"]

References:
    - Bleach method: https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
    - Texture method: Inspired by computer graphics techniques for snow rendering
      and atmospheric scattering simulations.

c                  @    \ rS rSr% S\S'   \" SS9rS\S'   S\S	'   S
rg)RandomSnow.InitSchemai  gAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, 1)), AfterValidator(nondecreasing)]snow_point_ranger   gtr   brightness_coeffLiteral['bleach', 'texture']methodru   N)rx   ry   rz   r{   r|   r   r   r}   ru   rt   rq   r~   r     s#    
 	
 #(1+%-,,rt   r~   c                D   > [         TU ]  US9  X l        Xl        X0l        g r   )r   r   r   r   r   )rp   r   r   r   r   r   s        rq   r   RandomSnow.__init__  s&     	1 0 0rt   c                   [        U5        U R                  S:X  a!  [        R                  " XU R                  5      $ U R                  S:X  a$  [        R
                  " UUU R                  UU5      $ [        SU R                   35      e)a  Apply the snow effect to the input image.

Args:
    img (np.ndarray): The input image to apply the snow effect to.
    snow_point (float): The snow point threshold.
    snow_texture (np.ndarray): The snow texture overlay.
    sparkle_mask (np.ndarray): The sparkle mask for the snow effect.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied snow effect.

bleachtexturezUnknown snow method: )r    r   r   add_snow_bleachr   add_snow_texturern   )rp   r   
snow_pointsnow_texturesparkle_maskr   s         rq   r   RandomSnow.apply  s~    * 	c;;("))#4;P;PQQ;;)#**%%  0>??rt   c                    US   SS nU R                   R                  " U R                  6 SSS.nU R                  S:X  a)  [        R
                  " UU R                  S9u  pVXTS'   XdS'   U$ )	a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, np.ndarray | None]: Dictionary with the following keys:
        - "snow_point" (np.ndarray | None): The snow point threshold.
        - "snow_texture" (np.ndarray | None): The snow texture overlay.
        - "sparkle_mask" (np.ndarray | None): The sparkle mask for the snow effect.

shapeN   )r   r   r   r   )	img_shaperandom_generatorr   r   )r   uniformr   r   r   generate_snow_texturesr   )rp   r   dataimage_shaperesultr   r   s          rq   get_params_dependent_on_data'RandomSnow.get_params_dependent_on_data  s}    $ Wobq)..00$2G2GH  
 ;;)#)/)F)F%!%!6!6*&L &2>"%1>"rt   )r   r   r   )g      @皙?333333?r   r   )r   r   r   tuple[float, float]r   r   r   r   )r   r   r   r   r   r   r   r   r   r   rw   r   )r   dict[str, Any]r   r   rw   zdict[str, np.ndarray | None]rx   ry   rz   r{   r   r(   r~   r   r   r   r}   r   r   s   @rq   rM   rM     s    IV-, - #&0:/7 . -	
  "@"@ "@ !	"@
 !"@ "@ 
"@H!! ! 
&	! !rt   rM   c                     ^  \ rS rSrSr " S S\5      r   S
     SU 4S jjjr    SS jr        SS jr	      SS jr
S	rU =r$ )rJ   iC  as
  Adds gravel-like artifacts to the input image.

This transform simulates the appearance of gravel or small stones scattered across
specific regions of an image. It's particularly useful for augmenting datasets of
road or terrain images, adding realistic texture variations.

Args:
    gravel_roi (tuple[float, float, float, float]): Region of interest where gravel
        will be added, specified as (x_min, y_min, x_max, y_max) in relative coordinates
        [0, 1]. Default: (0.1, 0.4, 0.9, 0.9).
    number_of_patches (int): Number of gravel patch regions to generate within the ROI.
        Each patch will contain multiple gravel particles. Default: 2.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - The gravel effect is created by modifying the saturation channel in the HLS color space.
    - Gravel particles are distributed within randomly generated patches inside the specified ROI.
    - This transform is particularly useful for:
      * Augmenting datasets for road condition analysis
      * Simulating variations in terrain for computer vision tasks
      * Adding realistic texture to synthetic images of outdoor scenes

Mathematical Formulation:
    For each gravel patch:
    1. A rectangular region is randomly generated within the specified ROI.
    2. Within this region, multiple gravel particles are placed.
    3. For each particle:
       - Random (x, y) coordinates are generated within the patch.
       - A random radius (r) between 1 and 3 pixels is assigned.
       - A random saturation value (sat) between 0 and 255 is assigned.
    4. The saturation channel of the image is modified for each particle:
       image_hls[y-r:y+r, x-r:x+r, 1] = sat

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage
    >>> transform = A.RandomGravel(p=1.0)
    >>> augmented_image = transform(image=image)["image"]

    # Custom ROI and number of patches
    >>> transform = A.RandomGravel(
    ...     gravel_roi=(0.2, 0.2, 0.8, 0.8),
    ...     number_of_patches=5,
    ...     p=1.0
    ... )
    >>> augmented_image = transform(image=image)["image"]

    # Combining with other transforms
    >>> transform = A.Compose([
    ...     A.RandomGravel(p=0.7),
    ...     A.RandomBrightnessContrast(p=0.5),
    ... ])
    >>> augmented_image = transform(image=image)["image"]

References:
    - Road surface textures: https://en.wikipedia.org/wiki/Road_surface
    - HLS color space: https://en.wikipedia.org/wiki/HSL_and_HSV

c                  R    \ rS rSr% S\S'   \" SS9rS\S'   \" SS	9SS
 j5       rSr	g)RandomGravel.InitSchemai  !tuple[float, float, float, float]
gravel_roi   ger   number_of_patchesri   rj   c                    U R                   u  pp4SUs=::  a  Us=:  a  S::  a  O  OSUs=::  a  Us=:  a  S::  d  O  [        SU R                    S35      eU $ )Nr   r   zInvalid gravel_roi. Got: .)r   rn   )rp   gravel_lower_xgravel_lower_ygravel_upper_xgravel_upper_ys        rq   _validate_gravel_roi,RandomGravel.InitSchema._validate_gravel_roi  sT    MQ__JNN<<1<ADmZhDmlmDm #<T__<MQ!OPPKrt   ru   Nrv   )
rx   ry   rz   r{   r|   r   r   r   r  r}   ru   rt   rq   r~   r     s.    55!&!3,	g	&	 
'	rt   r~   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r   r   )rp   r   r   r   r   s       rq   r   RandomGravel.__init__  s      	1$!2rt   c                   Uu  p#pE[        XB-
  XS-
  -  5      nUS-  n[        R                  " US/[        R                  S9nU R                  R                  X$U5      USS2S4'   U R                  R                  X5U5      USS2S4'   U$ )a  Generate gravel particles within a specified rectangular region.

Args:
    rectangular_roi (tuple[int, int, int, int]): The rectangular region where gravel
        particles will be generated, specified as (x_min, y_min, x_max, y_max) in pixel coordinates.

Returns:
    np.ndarray: An array of gravel particles with shape (count, 2), where count is the number of particles.
    Each row contains the (x, y) coordinates of a gravel particle.


   r   r   Nr   r   )absr   emptyint64r   integers)	rp   rectangular_roix_miny_minx_maxy_maxareacountgravelss	            rq   generate_gravel_patch"RandomGravel.generate_gravel_patch  s     &5"eEMem45
((E1:RXX6--66uUK1--66uUK1rt   c                .    [         R                  " X5      $ )aL  Apply the gravel effect to the input image.

Args:
    img (np.ndarray): The input image to apply the gravel effect to.
    gravels_infos (list[Any]): Information about the gravel particles.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied gravel effect.

)r   
add_gravel)rp   r   gravels_infosr   s       rq   r   RandomGravel.apply  s    "   44rt   c                   US   SS u  p4S [        U R                  XCXC/5       5       u  pVpxXu-
  n	X-
  n
/ n[        U R                  5       GHb  nU R                  R                  U	S-  U	S-  5      nU R                  R                  U
S-  U
S-  5      nU R                  R                  XWU-
  5      nU R                  R                  XhU-
  5      nX-  S-  n[        U5       H  nU R                  R                  XU-   5      nU R                  R                  UUU-   5      nU R                  R                  SS	5      nU R                  R                  S
S5      nUR                  [        UU-
  S
5      [        UU-   US-
  5      [        UU-
  S
5      [        UU-   US-
  5      U/5        M     GMe     S[        R                  " U[        R                  S90$ )a>  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, np.ndarray]: Dictionary with the following keys:
        - "gravels_infos" (np.ndarray): Information about the gravel particles.

r   Nr   c              3  @   #    U  H  u  p[        X-  5      v   M     g 7fNr   ).0coorddims      rq   	<genexpr><RandomGravel.get_params_dependent_on_data.<locals>.<genexpr>  s       &
/cC/cs   r     r   r   r[   r      r  r   )zipr   ranger   r   r   appendmaxminr   r   r  )rp   r   r   heightwidthr  r  r  r  	roi_width
roi_heightgravels_info_patch_widthpatch_heightpatch_xpatch_ynum_particlesxyrsats                         rq   r   )RandomGravel.get_params_dependent_on_data  s     w+&
/24??UTYDb/c&
"e M	]
t--.A..00b)q.QK>>11*2BJRSOTLnn,,UK4GHGnn,,UL4HIG )7C?M=)NN**7k4IJNN**7Gl4JKNN**1a0nn,,Q4##AE1AE6A:.AE1AE519- * /6  ,bhh!GHHrt   )r   r   ))r   皙??r@  r   r   )r   r   r   r   r   r   )r  ztuple[int, int, int, int]rw   r   )r   r   r  	list[Any]r   r   rw   r   )r   r   r   r   rw   dict[str, np.ndarray])rx   ry   rz   r{   r   r(   r~   r   r  r   r   r}   r   r   s   @rq   rJ   rJ   C  s    FP	, 	 9M!"	353 3 	3 32 
.55 !5 	5
 
5&7I7I 7I 
	7I 7Irt   rJ   c                     ^  \ rS rSrSr " S S\5      r        S	               S
U 4S jjjr            SS jr      SS jr	Sr
U =r$ )rK   i  a  Adds rain effects to an image.

This transform simulates rainfall by overlaying semi-transparent streaks onto the image,
creating a realistic rain effect. It can be used to augment datasets for computer vision
tasks that need to perform well in rainy conditions.

Args:
    slant_range (tuple[float, float]): Range for the rain slant angle in degrees.
        Negative values slant to the left, positive to the right. Default: (-10, 10).
    drop_length (int | None): Length of the rain drops in pixels.
        If None, drop length will be automatically calculated as height // 8.
        This allows the rain effect to scale with the image size.
        Default: None
    drop_width (int): Width of the rain drops in pixels. Default: 1.
    drop_color (tuple[int, int, int]): Color of the rain drops in RGB format. Default: (200, 200, 200).
    blur_value (int): Blur value for simulating rain effect. Rainy views are typically blurry. Default: 7.
    brightness_coefficient (float): Coefficient to adjust the brightness of the image.
        Rainy scenes are usually darker. Should be in the range (0, 1]. Default: 0.7.
    rain_type (Literal["drizzle", "heavy", "torrential", "default"]): Type of rain to simulate.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3
Note:
    - The rain effect is created by drawing semi-transparent lines on the image.
    - The slant of the rain can be controlled to simulate wind effects.
    - Different rain types (drizzle, heavy, torrential) adjust the density and appearance of the rain.
    - The transform also adjusts image brightness and applies a blur to simulate the visual effects of rain.
    - This transform is particularly useful for:
      * Augmenting datasets for autonomous driving in rainy conditions
      * Testing the robustness of computer vision models to weather effects
      * Creating realistic rainy scenes for image editing or film production
Mathematical Formulation:
    For each raindrop:
    1. Start position (x1, y1) is randomly generated within the image.
    2. End position (x2, y2) is calculated based on drop_length and slant:
       x2 = x1 + drop_length * sin(slant)
       y2 = y1 + drop_length * cos(slant)
    3. A line is drawn from (x1, y1) to (x2, y2) with the specified drop_color and drop_width.
    4. The image is then blurred and its brightness is adjusted.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
    # Default usage
    >>> transform = A.RandomRain(p=1.0)
    >>> rainy_image = transform(image=image)["image"]
    # Custom rain parameters
    >>> transform = A.RandomRain(
    ...     slant_range=(-15, 15),
    ...     drop_length=30,
    ...     drop_width=2,
    ...     drop_color=(180, 180, 180),
    ...     blur_value=5,
    ...     brightness_coefficient=0.8,
    ...     p=1.0
    ... )
    >>> rainy_image = transform(image=image)["image"]
    # Simulating heavy rain
    >>> transform = A.RandomRain(rain_type="heavy", p=1.0)
    >>> heavy_rain_image = transform(image=image)["image"]

References:
    - Rain visualization techniques: https://developer.nvidia.com/gpugems/gpugems3/part-iv-image-effects/chapter-27-real-time-rain-rendering
    - Weather effects in computer vision: https://www.sciencedirect.com/science/article/pii/S1077314220300692

c                      \ rS rSr% S\S'   S\S'   \" SS9rS\S	'   S
\S'   \" SS9rS\S'   \" SSS9rS\S'   S\S'   Sr	g)RandomRain.InitSchemaiQ  zAnnotated[tuple[float, float], AfterValidator(nondecreasing), AfterValidator(check_range_bounds(-MAX_RAIN_ANGLE, MAX_RAIN_ANGLE))]slant_range
int | Nonedrop_lengthr   r   r   
drop_widthtuple[int, int, int]
drop_color
blur_valuer   r   ler   brightness_coefficient4Literal['drizzle', 'heavy', 'torrential', 'default']	rain_typeru   N)
rx   ry   rz   r{   r|   r   rI  rL  rO  r}   ru   rt   rq   r~   rE  Q  sN    
 	

  1+
C%((1+
C%(-q(99GGrt   r~   c	                t   > [         T	U ]  US9  Xl        X l        X0l        X@l        XPl        X`l        Xpl        g r   )	r   r   rF  rH  rI  rK  rL  rO  rQ  )
rp   rF  rH  rI  rK  rL  rO  rQ  r   r   s
            rq   r   RandomRain.__init__^  s;     	1&&$$$&<#"rt   c           
         [        U5        [        R                  " UUUU R                  U R                  U R
                  U R                  U5      $ )a  Apply the rain effect to the input image.

Args:
    img (np.ndarray): The input image to apply the rain effect to.
    slant (float): The slant angle of the rain.
    drop_length (int): The length of the rain drops.
    rain_drops (np.ndarray): The coordinates of the rain drops.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied rain effect.

)r    r   add_rainrI  rK  rL  rO  )rp   r   slantrH  
rain_dropsr   s         rq   r   RandomRain.applyr  sG    * 	cOOOOOO''	
 		
rt   c                   US   SS u  p4U R                   S:X  a  US-  nO.U R                   S:X  a  UnOU R                   S:X  a  US-  nOUS-  nU R                  c  [        S	US
-  5      OU R                  nU R                  R                  " U R
                  6 nUS:  a5  U R                  R                  SS/XCU-
  /US4[        R                  S9nUn	O#[        R                  " S[        R                  S9n	XgU	S.$ )a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following keys:
        - "drop_length" (int): The length of the rain drops.
        - "slant" (float): The slant angle of the rain.
        - "rain_drops" (np.ndarray): The coordinates of the rain drops.

r   Nr   drizzle   heavy
torrentialr[   r      r   )lowhighsizer   )r   r   r   )rH  rV  rW  )rQ  rH  r-  r   r   rF  r   r  r   int32r  )
rp   r   r   r/  r0  	num_dropsrH  rV  coordsrW  s
             rq   r   'RandomRain.get_params_dependent_on_data  s   $ w+ >>Y&!I^^w&I^^|+
I!I-1-=-=-Ec!Vq[)4K[K[ &&(8(89 q=**33Fk12^hh	 4 F  J&9J**UUrt   )rL  rO  rK  rH  rI  rQ  rF  ))ir  Nr   )   rf  rf     ffffff?defaultr   )rF  r   rH  rG  rI  r   rK  rJ  rL  r   rO  r   rQ  rP  r   r   )r   r   rV  r   rH  r   rW  r   r   r   rw   r   r   r   r   r   rw   r   r   r   s   @rq   rK   rK     s    IVH, H ,5"&+:(+JS#(#  # 	#
 )# # !&# H# # #( 
 
  
 	 

  
  
 
 
D0V0V 0V 
	0V 0Vrt   rK   c                     ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjr            SS jr      SS jr	Sr
U =r$ )rH   i  a!
  Simulates fog for the image by adding random fog-like artifacts.

This transform creates a fog effect by generating semi-transparent overlays
that mimic the visual characteristics of fog. The fog intensity and distribution
can be controlled to create various fog-like conditions. An image size dependent
Gaussian blur is applied to the resulting image

Args:
    fog_coef_range (tuple[float, float]): Range for fog intensity coefficient. Should be in [0, 1] range.
    alpha_coef (float): Transparency of the fog circles. Should be in [0, 1] range. Default: 0.08.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - The fog effect is created by overlaying semi-transparent circles on the image.
    - Higher fog coefficient values result in denser fog effects.
    - The fog is typically denser in the center of the image and gradually decreases towards the edges.
    - Image is blurred to decrease the sharpness
    - This transform is useful for:
      * Simulating various weather conditions in outdoor scenes
      * Data augmentation for improving model robustness to foggy conditions
      * Creating atmospheric effects in image editing

Mathematical Formulation:
    For each fog particle:
    1. A position (x, y) is randomly generated within the image.
    2. A circle with random radius is drawn at this position.
    3. The circle's alpha (transparency) is determined by the alpha_coef.
    4. These circles are overlaid on the original image to create the fog effect.
    5. A Gaussian blur dependent on the shorter dimension is applied

    The final pixel value is calculated as:
    output = blur((1 - alpha) * original_pixel + alpha * fog_color)

    where alpha is influenced by the fog_coef and alpha_coef parameters.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage
    >>> transform = A.RandomFog(p=1.0)
    >>> foggy_image = transform(image=image)["image"]

    # Custom fog intensity range
    >>> transform = A.RandomFog(fog_coef_lower=0.3, fog_coef_upper=0.8, p=1.0)
    >>> foggy_image = transform(image=image)["image"]

    # Adjust fog transparency
    >>> transform = A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.5, alpha_coef=0.1, p=1.0)
    >>> foggy_image = transform(image=image)["image"]

References:
    - Fog: https://en.wikipedia.org/wiki/Fog
    - Atmospheric perspective: https://en.wikipedia.org/wiki/Aerial_perspective

c                  8    \ rS rSr% S\S'   \" SSS9rS\S'   S	rg
)RandomFog.InitSchemai  r   fog_coef_ranger   r   r   rN  r   
alpha_coefru   N)rx   ry   rz   r{   r|   r   rp  r}   ru   rt   rq   r~   rm    s    
 	
 "Q1-
E-rt   r~   c                8   > [         TU ]  US9  X l        Xl        g r   )r   r   rn  rp  )rp   rp  rn  r   r   s       rq   r   RandomFog.__init__  s      	1,$rt   c                `    [        U5        [        R                  " UUU R                  UU5      $ )a  Apply the fog effect to the input image.

Args:
    img (np.ndarray): The input image to apply the fog effect to.
    particle_positions (list[tuple[int, int]]): The coordinates of the fog particles.
    radiuses (list[int]): The radii of the fog particles.
    intensity (float): The intensity of the fog.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied fog effect.

)r    r   add_fogrp  )rp   r   particle_positionsradiuses	intensityr   s         rq   r   RandomFog.apply  s0    * 	c~~OO
 	
rt   c                   U R                   R                  " U R                  6 nUS   SS nUu  pV[        S[	        US-  U-  5      5      n/ nS [
        R                  " U5       5       u  pUnUnSnSnS	nX:  a  X:  a  X:  a  X-  n[	        UXw-  -  U-  S-  5      n[        U5       Hb  nU R                   R                  XS-  -
  XS-  -   5      nU R                   R                  XS-  -
  XS-  -   5      nUR                  UU45        Md     [	        USU-
  -  5      n[	        USU-
  -  5      nUS-  nX:  a  X:  a  X:  a  M  [        R                  " U[        U5      UU R                  5      nUUUS
.$ )a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following keys:
        - "intensity" (float): The intensity of the fog.
        - "particle_positions" (list[tuple[int, int]]): The coordinates of the fog particles.
        - "radiuses" (list[int]): The radii of the fog particles.

r   Nr   r   r[   c              3  8   #    U  H  n[        U5      v   M     g 7fr!  r"  )r#  r:  s     rq   r&  9RandomFog.get_params_dependent_on_data.<locals>.<genexpr>[  s     M.Lc!ff.Ls   r   r  r   )ru  rw  rv  )r   r   rn  r-  r   
fgeometriccenterr+  r   r,  r   get_fog_particle_radiuseslenr   )rp   r   r   rw  r   image_heightimage_widthfog_region_sizeru  center_xcenter_ycurrent_widthcurrent_heightshrink_factormax_iterations	iterationr  particles_in_regionr4  r:  r;  rv  s                         rq   r   &RandomFog.get_params_dependent_on_data<  s   & NN**D,?,?@	Wobq)$/! a[A%5	%A!BC Nj.?.?.LM $% 	-.2RW`Wq 1D"%9:YFK# ./NN**1111 NN**2222 #))1a&1 0  ]1B CDM 1}3D!EFNNI/ -.2RW`Wq2 33"#!!	
 #5" 
 	
rt   )rp  rn  )g{Gz?)r   r   r   )rp  r   rn  r   r   r   )r   r   ru  zlist[tuple[int, int]]rv  	list[int]rw  r   r   r   rw   r   rj  r   r   s   @rq   rH   rH     s    AF., . !.6	%% ,% 	% %

 2
 	

 
 
 

<O
O
 O
 
	O
 O
rt   rH   c                     ^  \ rS rSrSr " S S\5      r       S	             S
U 4S jjjr          SS jr      SS jr	Sr
U =r$ )rN   i  uk  Simulates a sun flare effect on the image by adding circles of light.

This transform creates a sun flare effect by overlaying multiple semi-transparent
circles of varying sizes and intensities along a line originating from a "sun" point.
It offers two methods: a simple overlay technique and a more complex physics-based approach.

Args:
    flare_roi (tuple[float, float, float, float]): Region of interest where the sun flare
        can appear. Values are in the range [0, 1] and represent (x_min, y_min, x_max, y_max)
        in relative coordinates. Default: (0, 0, 1, 0.5).
    angle_range (tuple[float, float]): Range of angles (in radians) for the flare direction.
        Values should be in the range [0, 1], where 0 represents 0 radians and 1 represents 2π radians.
        Default: (0, 1).
    num_flare_circles_range (tuple[int, int]): Range for the number of flare circles to generate.
        Default: (6, 10).
    src_radius (int): Radius of the sun circle in pixels. Default: 400.
    src_color (tuple[int, int, int]): Color of the sun in RGB format. Default: (255, 255, 255).
    method (Literal["overlay", "physics_based"]): Method to use for generating the sun flare.
        "overlay" uses a simple alpha blending technique, while "physics_based" simulates
        more realistic optical phenomena. Default: "overlay".

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    The transform offers two methods for generating sun flares:

    1. Overlay Method ("overlay"):
       - Creates a simple sun flare effect using basic alpha blending.
       - Steps:
         a. Generate the main sun circle with a radial gradient.
         b. Create smaller flare circles along the flare line.
         c. Blend these elements with the original image using alpha compositing.
       - Characteristics:
         * Faster computation
         * Less realistic appearance
         * Suitable for basic augmentation or when performance is a priority

    2. Physics-based Method ("physics_based"):
       - Simulates more realistic optical phenomena observed in actual lens flares.
       - Steps:
         a. Create a separate flare layer for complex manipulations.
         b. Add the main sun circle and diffraction spikes to simulate light diffraction.
         c. Generate and add multiple flare circles with varying properties.
         d. Apply Gaussian blur to create a soft, glowing effect.
         e. Create and apply a radial gradient mask for natural fading from the center.
         f. Simulate chromatic aberration by applying different blurs to color channels.
         g. Blend the flare with the original image using screen blending mode.
       - Characteristics:
         * More computationally intensive
         * Produces more realistic and visually appealing results
         * Includes effects like diffraction spikes and chromatic aberration
         * Suitable for high-quality augmentation or realistic image synthesis

Mathematical Formulation:
    For both methods:
    1. Sun position (x_s, y_s) is randomly chosen within the specified ROI.
    2. Flare angle θ is randomly chosen from the angle_range.
    3. For each flare circle i:
       - Position (x_i, y_i) = (x_s + t_i * cos(θ), y_s + t_i * sin(θ))
         where t_i is a random distance along the flare line.
       - Radius r_i is randomly chosen, with larger circles closer to the sun.
       - Alpha (transparency) alpha_i is randomly chosen in the range [0.05, 0.2].
       - Color (R_i, G_i, B_i) is randomly chosen close to src_color.

    Overlay method blending:
    new_pixel = (1 - alpha_i) * original_pixel + alpha_i * flare_color_i

    Physics-based method blending:
    new_pixel = 255 - ((255 - original_pixel) * (255 - flare_pixel) / 255)

    4. Each flare circle is blended with the image using alpha compositing:
       new_pixel = (1 - alpha_i) * original_pixel + alpha_i * flare_color_i

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [1000, 1000, 3], dtype=np.uint8)

    # Default sun flare (overlay method)
    >>> transform = A.RandomSunFlare(p=1.0)
    >>> flared_image = transform(image=image)["image"]

    # Physics-based sun flare with custom parameters

    # Default sun flare
    >>> transform = A.RandomSunFlare(p=1.0)
    >>> flared_image = transform(image=image)["image"]

    # Custom sun flare parameters

    >>> transform = A.RandomSunFlare(
    ...     flare_roi=(0.1, 0, 0.9, 0.3),
    ...     angle_range=(0.25, 0.75),
    ...     num_flare_circles_range=(5, 15),
    ...     src_radius=200,
    ...     src_color=(255, 200, 100),
    ...     method="physics_based",
    ...     p=1.0
    ... )
    >>> flared_image = transform(image=image)["image"]

References:
    - Lens flare: https://en.wikipedia.org/wiki/Lens_flare
    - Alpha compositing: https://en.wikipedia.org/wiki/Alpha_compositing
    - Diffraction: https://en.wikipedia.org/wiki/Diffraction
    - Chromatic aberration: https://en.wikipedia.org/wiki/Chromatic_aberration
    - Screen blending: https://en.wikipedia.org/wiki/Blend_modes#Screen

c                  z    \ rS rSr% S\S'   \" SS9rS\S'   S\S	'   S
\S'   S\S'   S\S'   \" SS9SS j5       rSr	g)RandomSunFlare.InitSchemai  r   	flare_roir   r   r   
src_radiustuple[int, ...]	src_colorr   angle_rangefAnnotated[tuple[int, int], AfterValidator(check_range_bounds(1, None)), AfterValidator(nondecreasing)]num_flare_circles_range#Literal['overlay', 'physics_based']r   ri   rj   c                    U R                   u  nnnnSUs=::  a  Us=:  a  S::  a  O  OSUs=::  a  Us=:  a  S::  d  O  [        SU R                    35      eU $ )Nr   r   zInvalid flare_roi. Got: )r  rn   )rp   flare_center_lower_xflare_center_lower_yflare_center_upper_xflare_center_upper_ys        rq   _validate_parameters.RandomSunFlare.InitSchema._validate_parameters  sc     $$$$ -I0DII0L3GL1L #;DNN;K!LMMKrt   ru   Nrv   )
rx   ry   rz   r{   r|   r   r  r   r  r}   ru   rt   rq   r~   r    sO    441+
C%""
 	
"
 	

 43	g	&	 
'	rt   r~   c                h   > [         TU ]  US9  X@l        XPl        X l        X0l        Xl        X`l        g r   )r   r   r  r  r  r  r  r   )	rp   r  r  r  r  r  r   r   r   s	           rq   r   RandomSunFlare.__init__(  s5     	1&'>$$""rt   c                @   [        U5        U R                  S:X  a.  [        R                  " UUU R                  U R
                  U5      $ U R                  S:X  a.  [        R                  " UUU R                  U R
                  U5      $ [        SU R                   35      e)a  Apply the sun flare effect to the input image.

Args:
    img (np.ndarray): The input image to apply the sun flare effect to.
    flare_center (tuple[float, float]): The center of the sun.
    circles (list[Any]): The circles to apply the sun flare effect to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied sun flare effect.

overlayphysics_basedzInvalid method: )r    r   r   add_sun_flare_overlayr  r  add_sun_flare_physics_basedrn   )rp   r   flare_centercirclesr   s        rq   r   RandomSunFlare.apply<  s    & 	c;;)#//  ;;/)55  +DKK=9::rt   c           
       ^^^ US   SS u  p4[         R                  " US-  US-  -   5      nS[         R                  -  U R                  R                  " U R
                  6 -  mU R                  u  pgp[        X@R                  R	                  Xh5      -  5      m[        X0R                  R	                  Xy5      -  5      mU R                  R                  " U R                  6 n
[        S[        US-  5      5      n[        S[        US-  5      5      n[        [        U R                  5      S-  5      nSUUU4S jjn[        T* UT-
  U5      nU Vs/ s H  nU" U5      PM     nn/ n[        U
5       H  nU R                  R	                  SS5      nU R                  R                  U5      nU R                  R                  SU5      nU R                   Vs/ s H,  nU R                  R                  [        UU-
  S	5      U5      PM.     nnUR                  U[        US	   5      [        US   5      4[        US
5      [!        U5      45        M     UTT4S.$ s  snf s  snf )a}  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following keys:
        - "circles" (list[Any]): The circles to apply the sun flare effect to.
        - "flare_center" (tuple[float, float]): The center of the sun.

r   Nr   r   {Gz?皙?c                t   > TU [         R                  " T5      -  -   TU [         R                  " T5      -  -   4$ r!  )mathcossin)tangleflare_center_xflare_center_ys    rq   line9RandomSunFlare.get_params_dependent_on_data.<locals>.line  s7    TXXe_!44TXXe_!44 rt   皙?r   r[   )r  r  )r  r   rw   r   )r  sqrtpir   r   r  r  r   r   r  r-  r  r+  choicer,  powtuple)rp   r   r   r/  r0  diagonalr  r  r  r  num_circles	step_size
max_radiuscolor_ranger  t_ranger  pointsr  r4  alphapointradccolorsr  r  r  s                            @@@rq   r   +RandomSunFlare.get_params_dependent_on_datac  s&   " w+99VQY12DGGdnn44d6F6FGG &*^^"eU^^%;%;E%IIJVnn&<&<U&JJKnn,,d.J.JK 3x$/0	C./
#dnn-34	 	 )?K#*+7a$q'7+{#ANN**45ENN))&1E..((J7C SWR`R`aR`Qdnn,,S[!-DaHR`FaNNq]CaM2QK&M	 $$ +^<
 	
) , bs   I3I)r  r  r   r  r  r  ))r   r   r   r   i  )r)  r)  r)  )r   r   )   r  r  r   )r  r   r  r   r  r  r  r   r  r   r   r  r   r   )
r   r   r  r   r  rA  r   r   rw   r   rj  r   r   s   @rq   rN   rN     s    un ,  H 8F%4+13:6?4  #	
 ) "1 4  (%;%; *%; 	%;
 %; 
%;NA
A
 A
 
	A
 A
rt   rN   c                     ^  \ rS rSrSr " S S\5      r     S	         S
U 4S jjjr          SS jr      SS jr	Sr
U =r$ )rL   i  a=  Simulates shadows for the image by reducing the brightness of the image in shadow regions.

This transform adds realistic shadow effects to images, which can be useful for augmenting
datasets for outdoor scene analysis, autonomous driving, or any computer vision task where
shadows may be present.

Args:
    shadow_roi (tuple[float, float, float, float]): Region of the image where shadows
        will appear (x_min, y_min, x_max, y_max). All values should be in range [0, 1].
        Default: (0, 0.5, 1, 1).
    num_shadows_limit (tuple[int, int]): Lower and upper limits for the possible number of shadows.
        Default: (1, 2).
    shadow_dimension (int): Number of edges in the shadow polygons. Default: 5.
    shadow_intensity_range (tuple[float, float]): Range for the shadow intensity. Larger value
        means darker shadow. Should be two float values between 0 and 1. Default: (0.5, 0.5).
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - Shadows are created by generating random polygons within the specified ROI and
      reducing the brightness of the image in these areas.
    - The number of shadows, their shapes, and intensities can be randomized for variety.
    - This transform is particularly useful for:
      * Augmenting datasets for outdoor scene understanding
      * Improving robustness of object detection models to shadowed conditions
      * Simulating different lighting conditions in synthetic datasets

Mathematical Formulation:
    For each shadow:
    1. A polygon with `shadow_dimension` vertices is generated within the shadow ROI.
    2. The shadow intensity a is randomly chosen from `shadow_intensity_range`.
    3. For each pixel (x, y) within the polygon:
       new_pixel_value = original_pixel_value * (1 - a)

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage
    >>> transform = A.RandomShadow(p=1.0)
    >>> shadowed_image = transform(image=image)["image"]

    # Custom shadow parameters
    >>> transform = A.RandomShadow(
    ...     shadow_roi=(0.2, 0.2, 0.8, 0.8),
    ...     num_shadows_limit=(2, 4),
    ...     shadow_dimension=8,
    ...     shadow_intensity_range=(0.3, 0.7),
    ...     p=1.0
    ... )
    >>> shadowed_image = transform(image=image)["image"]

    # Combining with other transforms
    >>> transform = A.Compose([
    ...     A.RandomShadow(p=0.5),
    ...     A.RandomBrightnessContrast(p=0.5),
    ... ])
    >>> augmented_image = transform(image=image)["image"]

References:
    - Shadow detection and removal: https://www.sciencedirect.com/science/article/pii/S1047320315002035
    - Shadows in computer vision: https://en.wikipedia.org/wiki/Shadow_detection

c                  f    \ rS rSr% S\S'   S\S'   \" SS9rS\S	'   S
\S'   \" SS9SS j5       rSr	g)RandomShadow.InitSchemai  r   
shadow_roir  num_shadows_limitr[   r   r   shadow_dimensionr   shadow_intensity_rangeri   rj   c                    U R                   u  pp4SUs=::  a  Us=::  a  S::  a  O  OSUs=::  a  Us=::  a  S::  d  O  [        SU R                    35      eU $ )Nr   r   zInvalid shadow_roi. Got: )r  rn   )rp   shadow_lower_xshadow_lower_yshadow_upper_xshadow_upper_ys        rq   _validate_shadows)RandomShadow.InitSchema._validate_shadows  sR    MQ__JNN=.=A=Q.Eo\jEonoEo #<T__<M!NOOKrt   ru   Nrv   )
rx   ry   rz   r{   r|   r   r  r   r  r}   ru   rt   rq   r~   r    sF    55
 	

 !&#+!
 	
 
g	&	 
'	rt   r~   c                P   > [         TU ]  US9  Xl        X0l        X l        X@l        g r   )r   r   r  r  r  r  )rp   r  r  r  r  r   r   s         rq   r   RandomShadow.__init__
  s,     	1$ 0!2&<#rt   c                0    [         R                  " XU5      $ )a  Apply the shadow effect to the input image.

Args:
    img (np.ndarray): The input image to apply the shadow effect to.
    vertices_list (list[np.ndarray]): The vertices of the shadow polygons.
    intensities (np.ndarray): The intensities of the shadows.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied shadow effect.

)r   
add_shadow)rp   r   vertices_listintensitiesr   s        rq   r   RandomShadow.apply  s    &   [AArt   c                ,   US   SS u  p4U R                   R                  " U R                  6 nU R                  u  pgp[	        Xd-  5      n[	        X-  5      n[	        Xs-  5      n[	        X-  5      n	[        U5       V
s/ s H`  n
[        R                  " U R                  R                  UUU R                  S9U R                  R                  UU	U R                  S9/SS9PMb     nn
U R                  R                  " U R                  SU06nXS.$ s  sn
f )	a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, list[np.ndarray]]: Dictionary with the following keys:
        - "vertices_list" (list[np.ndarray]): The vertices of the shadow polygons.
        - "intensities" (np.ndarray): The intensities of the shadows.

r   Nr   )ra  r   axisra  )r  r  )r   r   r  r  r   r+  r   stackr   r  r  r   r  )rp   r   r   r/  r0  num_shadowsr  r  r  r  r4  r  r  s                rq   r   )RandomShadow.get_params_dependent_on_data.  s7   " w+nn,,d.D.DE%)__"eEM"EM"EN#EN#$ ;'!
  ( HH))22!22 3 
 ))22!22 3   (! 	 
( ++33((


 "/KK3
s   >A'D)r  r  r  r  ))r   r   r   r   )r   r   r(  r   r   r   )
r  r   r  r   r  r   r  r   r   r   )
r   r   r  zlist[np.ndarray]r  r   r   r   rw   r   )r   r   r   r   rw   zdict[str, list[np.ndarray]]r   r   s   @rq   rL   rL     s    HT, 4 9G-3 !6@=5= += 	=
 !4= = =BB (B  	B
 B 
B*5L5L 5L 
%	5L 5Lrt   rL   c                     ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjr          SS jr      SS jr	Sr
U =r$ )rO   if  u  Randomly change the relationship between bright and dark areas of the image by manipulating its tone curve.

This transform applies a random S-curve to the image's tone curve, adjusting the brightness and contrast
in a non-linear manner. It can be applied to the entire image or to each channel separately.

Args:
    scale (float): Standard deviation of the normal distribution used to sample random distances
        to move two control points that modify the image's curve. Values should be in range [0, 1].
        Higher values will result in more dramatic changes to the image. Default: 0.1
    per_channel (bool): If True, the tone curve will be applied to each channel of the input image separately,
        which can lead to color distortion. If False, the same curve is applied to all channels,
        preserving the original color relationships. Default: False
    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - This transform modifies the image's histogram by applying a smooth, S-shaped curve to it.
    - The S-curve is defined by moving two control points of a quadratic Bézier curve.
    - When per_channel is False, the same curve is applied to all channels, maintaining color balance.
    - When per_channel is True, different curves are applied to each channel, which can create color shifts.
    - This transform can be used to adjust image contrast and brightness in a more natural way than linear
        transforms.
    - The effect can range from subtle contrast adjustments to more dramatic "vintage" or "faded" looks.

Mathematical Formulation:
    1. Two control points are randomly moved from their default positions (0.25, 0.25) and (0.75, 0.75).
    2. The new positions are sampled from a normal distribution: N(μ, σ²), where μ is the original position
    and alpha is the scale parameter.
    3. These points, along with fixed points at (0, 0) and (1, 1), define a quadratic Bézier curve.
    4. The curve is applied as a lookup table to the image intensities:
       new_intensity = curve(original_intensity)

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)

    # Apply a random tone curve to all channels together
    >>> transform = A.RandomToneCurve(scale=0.1, per_channel=False, p=1.0)
    >>> augmented_image = transform(image=image)['image']

    # Apply random tone curves to each channel separately
    >>> transform = A.RandomToneCurve(scale=0.2, per_channel=True, p=1.0)
    >>> augmented_image = transform(image=image)['image']

References:
    - "What Else Can Fool Deep Learning? Addressing Color Constancy Errors on Deep Neural Network Performance":
      https://arxiv.org/abs/1912.06960
    - Bézier curve: https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B%C3%A9zier_curves
    - Tone mapping: https://en.wikipedia.org/wiki/Tone_mapping

c                  8    \ rS rSr% \" SSS9rS\S'   S\S'   S	rg
)RandomToneCurve.InitSchemai  r   r   ro  r   scaleboolper_channelru   N)rx   ry   rz   r{   r   r  r|   r}   ru   rt   rq   r~   r    s!    
u 	
 rt   r~   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r  r  )rp   r  r  r   r   s       rq   r   RandomToneCurve.__init__  s      	1
&rt   c                0    [         R                  " XU5      $ )a  Apply the tone curve to the input image.

Args:
    img (np.ndarray): The input image to apply the tone curve to.
    low_y (float | np.ndarray): The lower control point of the tone curve.
    high_y (float | np.ndarray): The upper control point of the tone curve.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied tone curve.

)r   move_tone_curve)rp   r   low_yhigh_yr   s        rq   r   RandomToneCurve.apply  s    & %%c&99rt   c           	     N   SU;   a  US   OUS   S   n[        U5      nU R                  (       a  US:w  ay  [        R                  " U R                  R                  SU R                  U4S9SS5      [        R                  " U R                  R                  SU R                  U4S9SS5      S.$ [        R                  " U R                  R                  SU R                  S	9SS5      n[        R                  " U R                  R                  SU R                  S	9SS5      nXVS.$ )
a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following keys:
        - "low_y" (float | np.ndarray): The lower control point of the tone curve.
        - "high_y" (float | np.ndarray): The upper control point of the tone curve.

imager   r   r         ?)locr  ra  g      ?)r  r  )r  r  )r   r  r   clipr   normalr  )rp   r   r   r  num_channelsr  r  s          rq   r   ,RandomToneCurve.get_params_dependent_on_data  s#   " ")DWd8nQ6G'. 1))00 "jj*_ 1 
  ''))00 "jj*_ 1 
  * --44TZZ4PRSUVW..55$djj5QSTVWX11rt   )r  r  )r   Fr   )r  r   r  r  r   r   )
r   r   r  float | np.ndarrayr  r  r   r   rw   r   rj  r   r   s   @rq   rO   rO   f  s    ;z,  !	'' ' 	' ':: ": #	:
 : 
:*.2.2 .2 
	.2 .2rt   rO   c                     ^  \ rS rSrSr " S S\5      r    S	       S
U 4S jjjr            SS jrSS jr	Sr
U =r$ )r;   i  a  Randomly change hue, saturation and value of the input image.

This transform adjusts the HSV (Hue, Saturation, Value) channels of an input RGB image.
It allows for independent control over each channel, providing a wide range of color
and brightness modifications.

Args:
    hue_shift_limit (float | tuple[float, float]): Range for changing hue.
        If a single float value is provided, the range will be (-hue_shift_limit, hue_shift_limit).
        Values should be in the range [-180, 180]. Default: (-20, 20).

    sat_shift_limit (float | tuple[float, float]): Range for changing saturation.
        If a single float value is provided, the range will be (-sat_shift_limit, sat_shift_limit).
        Values should be in the range [-255, 255]. Default: (-30, 30).

    val_shift_limit (float | tuple[float, float]): Range for changing value (brightness).
        If a single float value is provided, the range will be (-val_shift_limit, val_shift_limit).
        Values should be in the range [-255, 255]. Default: (-20, 20).

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - The transform first converts the input RGB image to the HSV color space.
    - Each channel (Hue, Saturation, Value) is adjusted independently.
    - Hue is circular, so it wraps around at 180 degrees.
    - For float32 images, the shift values are applied as percentages of the full range.
    - This transform is particularly useful for color augmentation and simulating
      different lighting conditions.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.HueSaturationValue(
    ...     hue_shift_limit=20,
    ...     sat_shift_limit=30,
    ...     val_shift_limit=20,
    ...     p=0.7
    ... )
    >>> result = transform(image=image)
    >>> augmented_image = result["image"]

References:
    HSV color space: https://en.wikipedia.org/wiki/HSL_and_HSV

c                  4    \ rS rSr% S\S'   S\S'   S\S'   Srg)HueSaturationValue.InitSchemai4  r$   hue_shift_limitsat_shift_limitval_shift_limitru   Nr   ru   rt   rq   r~   r  4  s    ++++++rt   r~   c                   > [         TU ]  US9  [        SU5      U l        [        SU5      U l        [        SU5      U l        g Nr   r   )r   r   r	   r  r   r  )rp   r  r   r  r   r   s        rq   r   HueSaturationValue.__init__9  sE     	1#$9?K#$9?K#$9?Krt   c                    [        U5      (       d  [        U5      (       d  Sn[        U5      e[        R                  " XX45      $ )a  Apply the hue, saturation, and value shifts to the input image.

Args:
    img (np.ndarray): The input image to apply the hue, saturation, and value shifts to.
    hue_shift (int): The hue shift value.
    sat_shift (int): The saturation shift value.
    val_shift (int): The value (brightness) shift value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied hue, saturation, and value shifts.

zHHueSaturationValue transformation expects 1-channel or 3-channel images.)r   r   	TypeErrorr   	shift_hsv)rp   r   	hue_shift	sat_shift	val_shiftr   msgs          rq   r   HueSaturationValue.applyE  s<    * C  );C)@)@\CC. 	EErt   c                    U R                   R                  " U R                  6 U R                   R                  " U R                  6 U R                   R                  " U R                  6 S.$ )a(  Generate parameters dependent on the input data.

Returns:
    dict[str, float]: Dictionary with the following keys:
        - "hue_shift" (float): The hue shift value.
        - "sat_shift" (float): The saturation shift value.
        - "val_shift" (float): The value (brightness) shift value.

)r  r	  r
  )r   r   r  r   r  ro   s    rq   r   HueSaturationValue.get_params_  sV     //1E1EF//1E1EF//1E1EF
 	
rt   )r  r   r  )ir\   )i   r  r   )r  tuple[float, float] | floatr   r  r  r  r   r   )r   r   r  r   r	  r   r
  r   r   r   rw   r   rw   dict[str, float]r   r   s   @rq   r;   r;     s    6p,, , 8A7@7@
L4
L 5
L 5	
L
 
L 
LFF F 	F
 F F 
F4
 
rt   r;   c                  h   ^  \ rS rSrSr " S S\5      r  S	   S
U 4S jjjrSS jrSS jr	Sr
U =r$ )rT   ip  a
  Invert all pixel values above a threshold.

This transform applies a solarization effect to the input image. Solarization is a phenomenon in
photography in which the image recorded on a negative or on a photographic print is wholly or
partially reversed in tone. Dark areas appear light or light areas appear dark.

In this implementation, all pixel values above a threshold are inverted.

Args:
    threshold_range (tuple[float, float]): Range for solarizing threshold as a fraction
        of maximum value. The threshold_range should be in the range [0, 1] and will be multiplied by the
        maximum value of the image type (255 for uint8 images or 1.0 for float images).
        Default: (0.5, 0.5) (corresponds to 127.5 for uint8 and 0.5 for float32).
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - For uint8 images, pixel values above the threshold are inverted as: 255 - pixel_value
    - For float32 images, pixel values above the threshold are inverted as: 1.0 - pixel_value
    - The threshold is applied to each channel independently
    - The threshold is calculated in two steps:
      1. Sample a value from threshold_range
      2. Multiply by the image's maximum value:
         * For uint8: threshold = sampled_value * 255
         * For float32: threshold = sampled_value * 1.0
    - This transform can create interesting artistic effects or be used for data augmentation

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>>
    # Solarize uint8 image with fixed threshold at 50% of max value (127.5)
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.Solarize(threshold_range=(0.5, 0.5), p=1.0)
    >>> solarized_image = transform(image=image)['image']
    >>>
    # Solarize uint8 image with random threshold between 40-60% of max value (102-153)
    >>> transform = A.Solarize(threshold_range=(0.4, 0.6), p=1.0)
    >>> solarized_image = transform(image=image)['image']
    >>>
    # Solarize float32 image at 50% of max value (0.5)
    >>> image = np.random.rand(100, 100, 3).astype(np.float32)
    >>> transform = A.Solarize(threshold_range=(0.5, 0.5), p=1.0)
    >>> solarized_image = transform(image=image)['image']

Mathematical Formulation:
    Let f be a value sampled from threshold_range (min, max).
    For each pixel value p:
    threshold = f * max_value
    if p > threshold:
        p_new = max_value - p
    else:
        p_new = p

    Where max_value is 255 for uint8 images and 1.0 for float32 images.

See Also:
    Invert: For inverting all pixel values regardless of a threshold.

c                       \ rS rSr% S\S'   Srg)Solarize.InitSchemai  r   threshold_rangeru   Nr   ru   rt   rq   r~   r    s    
 	
rt   r~   c                ,   > [         TU ]  US9  Xl        g r   )r   r   r  )rp   r  r   r   s      rq   r   Solarize.__init__  s    
 	1.rt   c                .    [         R                  " X5      $ )a7  Apply the solarize effect to the input image.

Args:
    img (np.ndarray): The input image to apply the solarize effect to.
    threshold (float): The threshold value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied solarize effect.

)r   solarize)rp   r   	thresholdr   s       rq   r   Solarize.apply  s     s..rt   c                L    SU R                   R                  " U R                  6 0$ )zGenerate parameters dependent on the input data.

Returns:
    dict[str, float]: Dictionary with the following key:
        - "threshold" (float): The threshold value.

r  )r   r   r  ro   s    rq   r   Solarize.get_params  s$     T^^33T5I5IJKKrt   )r  )r  r   )r  r   r   r   )r   r   r  r   r   r   rw   r   r  r   r   s   @rq   rT   rT   p  sL    CJ
, 
 0:/,/ / //L Lrt   rT   c                  x   ^  \ rS rSrSr " S S\5      r  S	   S
U 4S jjjr        SS jrSS jr	Sr
U =r$ )rE   i  a
  Reduces the number of bits for each color channel in the image.

This transform applies color posterization, a technique that reduces the number of distinct
colors used in an image. It works by lowering the number of bits used to represent each
color channel, effectively creating a "poster-like" effect with fewer color gradations.

Args:
    num_bits (int | tuple[int, int] | list[int] | list[tuple[int, int]]):
        Defines the number of bits to keep for each color channel. Can be specified in several ways:
        - Single int: Same number of bits for all channels. Range: [1, 7].
        - tuple of two ints: (min_bits, max_bits) to randomly choose from. Range for each: [1, 7].
        - list of three ints: Specific number of bits for each channel [r_bits, g_bits, b_bits].
        - list of three tuples: Ranges for each channel [(r_min, r_max), (g_min, g_max), (b_min, b_max)].
        Default: 4

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - The effect becomes more pronounced as the number of bits is reduced.
    - This transform can create interesting artistic effects or be used for image compression simulation.
    - Posterization is particularly useful for:
      * Creating stylized or retro-looking images
      * Reducing the color palette for specific artistic effects
      * Simulating the look of older or lower-quality digital images
      * Data augmentation in scenarios where color depth might vary

Mathematical Background:
    For an 8-bit color channel, posterization to n bits can be expressed as:
    new_value = (old_value >> (8 - n)) << (8 - n)
    This operation keeps the n most significant bits and sets the rest to zero.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Posterize all channels to 3 bits
    >>> transform = A.Posterize(num_bits=3, p=1.0)
    >>> posterized_image = transform(image=image)["image"]

    # Randomly posterize between 2 and 5 bits
    >>> transform = A.Posterize(num_bits=(2, 5), p=1.0)
    >>> posterized_image = transform(image=image)["image"]

    # Different bits for each channel
    >>> transform = A.Posterize(num_bits=[3, 5, 2], p=1.0)
    >>> posterized_image = transform(image=image)["image"]

    # Range of bits for each channel
    >>> transform = A.Posterize(num_bits=[(1, 3), (3, 5), (2, 4)], p=1.0)
    >>> posterized_image = transform(image=image)["image"]

References:
    - Color Quantization: https://en.wikipedia.org/wiki/Color_quantization
    - Posterization: https://en.wikipedia.org/wiki/Posterization

c                  R    \ rS rSr% S\S'   \" S5      \    SS j5       5       rSrg)Posterize.InitSchemai"  -int | tuple[int, int] | list[tuple[int, int]]num_bitsc                   [        U[        5      (       a  US:  d
  U[        :  a  [        S5      eX4$ [        U[        5      (       a/  [        U5      [        :  a  U Vs/ s H  n[        X"5      PM     sn$ [        X5      $ s  snf )Nr   z$num_bits must be in the range [1, 7])
isinstancer   r-   rn   r   r  r,   r.   )clsr$  is      rq   _validate_num_bits'Posterize.InitSchema._validate_num_bits%  su     (C((a<8e#3$%KLL ++(H--#h-$2F089199H// :s    Bru   N)r$  r   rw   z'tuple[int, int] | list[tuple[int, int]])	rx   ry   rz   r{   r|   r   classmethodr)  r}   ru   rt   rq   r~   r"  "  s9    ??		$	
	0
	0 5
	0 
 
%
	0rt   r~   c                B   > [         TU ]  US9  [        SU5      U l        g )Nr   z-Union[tuple[int, int], list[tuple[int, int]]])r   r   r	   r$  )rp   r$  r   r   s      rq   r   Posterize.__init__3  s$    
 	1LhWrt   c                .    [         R                  " X5      $ )a  Apply the posterize effect to the input image.

Args:
    img (np.ndarray): The input image to apply the posterize effect to.
    num_bits (Literal[1, 2, 3, 4, 5, 6, 7] | list[Literal[1, 2, 3, 4, 5, 6, 7]]):
        The number of bits to keep for each color channel.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied posterize effect.

)r   	posterize)rp   r   r$  r   s       rq   r   Posterize.apply;  s    $ ..rt   c                    [        U R                  [        5      (       a6  U R                   Vs/ s H  oR                  R                  " U6 PM     nnSU0$ SU R                  R                  " U R                  6 0$ s  snf )a  Generate parameters dependent on the input data.

Returns:
    dict[str, Any]: Dictionary with the following key:
        - "num_bits" (Literal[1, 2, 3, 4, 5, 6, 7] | list[Literal[1, 2, 3, 4, 5, 6, 7]]):
            The number of bits to keep for each color channel.

r$  )r&  r$  listr   r   )rp   r(  r$  s      rq   r   Posterize.get_paramsO  si     dmmT**<@MMJMq..2MHJ))DNN22DMMBCC Ks   "A;)r$  )r[  r   )r$  r#  r   r   )r   r   r$  zALiteral[1, 2, 3, 4, 5, 6, 7] | list[Literal[1, 2, 3, 4, 5, 6, 7]]r   r   rw   r   rw   r   r   r   s   @rq   rE   rE     sy    AF0, 0& CDX?X X X// T/ 	/
 
/(D Drt   rE   c                     ^  \ rS rSrSr " S S\5      r     S
         SU 4S jjjrSS jr      SS jr	\
SS j5       rS	rU =r$ )r7   i^  a  Equalize the image histogram.

This transform applies histogram equalization to the input image. Histogram equalization
is a method in image processing of contrast adjustment using the image's histogram.

Args:
    mode (Literal['cv', 'pil']): Use OpenCV or Pillow equalization method.
        Default: 'cv'
    by_channels (bool): If True, use equalization by channels separately,
        else convert image to YCbCr representation and use equalization by `Y` channel.
        Default: True
    mask (np.ndarray, callable): If given, only the pixels selected by
        the mask are included in the analysis. Can be:
        - A 1-channel or 3-channel numpy array of the same size as the input image.
        - A callable (function) that generates a mask. The function should accept 'image'
          as its first argument, and can accept additional arguments specified in mask_params.
        Default: None
    mask_params (list[str]): Additional parameters to pass to the mask function.
        These parameters will be taken from the data dict passed to __call__.
        Default: ()
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    1,3

Note:
    - When mode='cv', OpenCV's equalizeHist() function is used.
    - When mode='pil', Pillow's equalize() function is used.
    - The 'by_channels' parameter determines whether equalization is applied to each color channel
      independently (True) or to the luminance channel only (False).
    - If a mask is provided as a numpy array, it should have the same height and width as the input image.
    - If a mask is provided as a function, it allows for dynamic mask generation based on the input image
      and additional parameters. This is useful for scenarios where the mask depends on the image content
      or external data (e.g., bounding boxes, segmentation masks).

Mask Function:
    When mask is a callable, it should have the following signature:
    mask_func(image, *args) -> np.ndarray

    - image: The input image (numpy array)
    - *args: Additional arguments as specified in mask_params

    The function should return a numpy array of the same height and width as the input image,
    where non-zero pixels indicate areas to be equalized.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>>
    >>> # Using a static mask
    >>> mask = np.random.randint(0, 2, (100, 100), dtype=np.uint8)
    >>> transform = A.Equalize(mask=mask, p=1.0)
    >>> result = transform(image=image)
    >>>
    >>> # Using a dynamic mask function
    >>> def mask_func(image, bboxes):
    ...     mask = np.ones_like(image[:, :, 0], dtype=np.uint8)
    ...     for bbox in bboxes:
    ...         x1, y1, x2, y2 = map(int, bbox)
    ...         mask[y1:y2, x1:x2] = 0  # Exclude areas inside bounding boxes
    ...     return mask
    >>>
    >>> transform = A.Equalize(mask=mask_func, mask_params=['bboxes'], p=1.0)
    >>> bboxes = [(10, 10, 50, 50), (60, 60, 90, 90)]  # Example bounding boxes
    >>> result = transform(image=image, bboxes=bboxes)

References:
    - OpenCV equalizeHist: https://docs.opencv.org/3.4/d6/dc7/group__imgproc__hist.html#ga7e54091f0c937d49bf84152a16f76d6e
    - Pillow ImageOps.equalize: https://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.equalize
    - Histogram Equalization: https://en.wikipedia.org/wiki/Histogram_equalization

c                  >    \ rS rSr% S\S'   S\S'   S\S'   S\S	'   S
rg)Equalize.InitSchemai  Literal['cv', 'pil']rk   r  by_channels&np.ndarray | Callable[..., Any] | NonemaskSequence[str]mask_paramsru   Nr   ru   rt   rq   r~   r7    s    ""44""rt   r~   c                P   > [         TU ]  US9  Xl        X l        X0l        X@l        g r   )r   r   rk   r9  r;  r=  )rp   rk   r9  r;  r=  r   r   s         rq   r   Equalize.__init__  s+     	1	&	&rt   c                    [        U5      (       d  [        U5      (       d  [        S5      e[        R                  " UU R
                  U R                  US9$ )a\  Apply the equalization effect to the input image.

Args:
    img (np.ndarray): The input image to apply the equalization effect to.
    mask (np.ndarray): The mask to apply the equalization effect to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied equalization effect.

zBEqualize transform is only supported for RGB and grayscale images.)rk   r9  r;  )r   r   rn   r   equalizerk   r9  )rp   r   r;  r   s       rq   r   Equalize.apply  sM     C  );C)@)@abb((	
 	
rt   c                    [        U R                  5      (       d  SU R                  0$ SUS   0nU R                   H  nXB;  a  [        SU S35      eX$   X4'   M     SU R                  " S0 UD60$ )a3  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following key:
        - "mask" (np.ndarray): The mask to apply the equalization effect to.

r;  r  zRequired parameter 'z'' for mask function is missing in data.ru   )callabler;  r=  KeyError)rp   r   r   r=  keys        rq   r   %Equalize.get_params_dependent_on_data  s      		""DII&&W.##C*3%/VW   $yK $ 		0K011rt   c                0    / [        U R                  5      Q$ )zxReturn the list of parameters that are used for generating the mask.

Returns:
    list[str]: List of parameter names.

)r2  r=  ro   s    rq   targets_as_paramsEqualize.targets_as_params  s     )d&&'((rt   )r9  r;  r=  rk   )cvTNru   r   )
rk   r8  r9  r  r;  r:  r=  r<  r   r   )r   r   r;  r   r   r   rw   r   rj  )rw   z	list[str])rx   ry   rz   r{   r   r(   r~   r   r   r   propertyrI  r}   r   r   s   @rq   r7   r7   ^  s    N`#, # &* 7;%''"' ' 5	'
 #' ' '
*22 2 
	2: ) )rt   r7   c                     ^  \ rS rSrSr " S S\5      r     S         SU 4S jjjr          SS jrSS jr	SS jr
SS	 jr      SS
 jrSrU =r$ )rG   i	  u  Randomly changes the brightness and contrast of the input image.

This transform adjusts the brightness and contrast of an image simultaneously, allowing for
a wide range of lighting and contrast variations. It's particularly useful for data augmentation
in computer vision tasks, helping models become more robust to different lighting conditions.

Args:
    brightness_limit (float | tuple[float, float]): Factor range for changing brightness.
        If a single float value is provided, the range will be (-brightness_limit, brightness_limit).
        Values should typically be in the range [-1.0, 1.0], where 0 means no change,
        1.0 means maximum brightness, and -1.0 means minimum brightness.
        Default: (-0.2, 0.2).

    contrast_limit (float | tuple[float, float]): Factor range for changing contrast.
        If a single float value is provided, the range will be (-contrast_limit, contrast_limit).
        Values should typically be in the range [-1.0, 1.0], where 0 means no change,
        1.0 means maximum increase in contrast, and -1.0 means maximum decrease in contrast.
        Default: (-0.2, 0.2).

    brightness_by_max (bool): If True, adjusts brightness by scaling pixel values up to the
        maximum value of the image's dtype. If False, uses the mean pixel value for adjustment.
        Default: True.

    ensure_safe_range (bool): If True, adjusts alpha and beta to prevent overflow/underflow.
        This ensures output values stay within the valid range for the image dtype without clipping.
        Default: False.

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - The order of operation is: contrast adjustment, then brightness adjustment.
    - For uint8 images, the output is clipped to [0, 255] range.
    - For float32 images, the output is clipped to [0, 1] range.
    - The `brightness_by_max` parameter affects how brightness is adjusted:
      * If True, brightness adjustment is more pronounced and can lead to more saturated results.
      * If False, brightness adjustment is more subtle and preserves the overall lighting better.
    - This transform is useful for:
      * Simulating different lighting conditions
      * Enhancing low-light or overexposed images
      * Data augmentation to improve model robustness

Mathematical Formulation:
    Let a be the contrast adjustment factor and β be the brightness adjustment factor.
    For each pixel value x:
    1. Contrast adjustment: x' = clip((x - mean) * (1 + a) + mean)
    2. Brightness adjustment:
       If brightness_by_max is True:  x'' = clip(x' * (1 + β))
       If brightness_by_max is False: x'' = clip(x' + β * max_value)
    Where clip() ensures values stay within the valid range for the image dtype.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage
    >>> transform = A.RandomBrightnessContrast(p=1.0)
    >>> augmented_image = transform(image=image)["image"]

    # Custom brightness and contrast limits
    >>> transform = A.RandomBrightnessContrast(
    ...     brightness_limit=0.3,
    ...     contrast_limit=0.3,
    ...     p=1.0
    ... )
    >>> augmented_image = transform(image=image)["image"]

    # Adjust brightness based on mean value
    >>> transform = A.RandomBrightnessContrast(
    ...     brightness_limit=0.2,
    ...     contrast_limit=0.2,
    ...     brightness_by_max=False,
    ...     p=1.0
    ... )
    >>> augmented_image = transform(image=image)["image"]

References:
    - Brightness: https://en.wikipedia.org/wiki/Brightness
    - Contrast: https://en.wikipedia.org/wiki/Contrast_(vision)

c                  >    \ rS rSr% S\S'   S\S'   S\S'   S\S'   Srg	)
#RandomBrightnessContrast.InitSchemai]	  r$   brightness_limitcontrast_limitr  brightness_by_maxensure_safe_rangeru   Nr   ru   rt   rq   r~   rO  ]	  s    ,,**rt   r~   c                |   > [         TU ]  US9  [        SU5      U l        [        SU5      U l        X0l        X@l        g r  )r   r   r	   rP  rQ  rR  rS  )rp   rP  rQ  rR  rS  r   r   s         rq   r   !RandomBrightnessContrast.__init__c	  sB     	1 $%:<L M"#8.I!2!2rt   c                .    [         R                  " XUSS9$ )a  Apply the brightness and contrast adjustment to the input image.

Args:
    img (np.ndarray): The input image to apply the brightness and contrast adjustment to.
    alpha (float): The contrast adjustment factor.
    beta (float): The brightness adjustment factor.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied brightness and contrast adjustment.

F)inplace)albucoremultiply_add)rp   r   r  betar   s        rq   r   RandomBrightnessContrast.applyq	  s    & $$SuEErt   c                .    U R                   " U/UQ70 UD6$ )a~  Apply the brightness and contrast adjustment to a batch of images.

Args:
    images (np.ndarray): The batch of images to apply the brightness and contrast adjustment to.
    *args (Any): Additional arguments.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The batch of images with the applied brightness and contrast adjustment.

r   rp   r   argsr   s       rq   r   (RandomBrightnessContrast.apply_to_images	       zz&242622rt   c                .    U R                   " U/UQ70 UD6$ )a  Apply the brightness and contrast adjustment to a batch of volumes.

Args:
    volumes (np.ndarray): The batch of volumes to apply the brightness and contrast adjustment to.
    *args (Any): Additional arguments.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The batch of volumes with the applied brightness and contrast adjustment.

r   rp   r   r^  r   s       rq   r   )RandomBrightnessContrast.apply_to_volumes	       zz'3D3F33rt   c                .    U R                   " U/UQ70 UD6$ )aj  Apply the brightness and contrast adjustment to a single volume.

Args:
    volume (np.ndarray): The volume to apply the brightness and contrast adjustment to.
    *args (Any): Additional arguments.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volume with the applied brightness and contrast adjustment.

r   rp   r   r^  r   s       rq   r   (RandomBrightnessContrast.apply_to_volume	  r`  rt   c                   SU;   a  US   OUS   S   nSU R                   R                  " U R                  6 -   nU R                   R                  " U R                  6 n[        UR
                     nU R                  (       a  XV-  OU[        R                  " U5      -  nU R                  (       a  [        R                  " UUU5      u  pEUUS.$ )a`  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, float]: Dictionary with the following keys:
        - "alpha" (float): The contrast adjustment factor.
        - "beta" (float): The brightness adjustment factor.

r  r   r   r   )r  rZ  )r   r   rQ  rP  r
   r   rR  r   rc   rS  r   #get_safe_brightness_contrast_params)rp   r   r   r  r  rZ  	max_values          rq   r   5RandomBrightnessContrast.get_params_dependent_on_data	  s    " ")DWd8nQ6G dnn,,d.A.ABB~~%%t'<'<='4	#'#9#9ttbggen?T !! DDKE 
 	
rt   )rR  rP  rQ  rS  )gɿr  rl  TFr   )
rP  r  rQ  r  rR  r  rS  r  r   r   )
r   r   r  r   rZ  r   r   r   rw   r   r   r   r^  r   r   r   rw   r   r   r   r^  r   r   r   rw   r   r   r   r^  r   r   r   rw   r   r   r   r   r   rw   r  )rx   ry   rz   r{   r   r(   r~   r   r   r   r   r   r   r}   r   r   s   @rq   rG   rG   	  s    Yv ,   9D6A"&"'353 43  	3
  3 3 3FF F 	F
 F 
F*343&
&
 &
 
	&
 &
rt   rG   c                     ^  \ rS rSrSr " S S\5      r     S	         S
U 4S jjjr        SS jr      SS jr	Sr
U =r$ )r9   i	  a^  Apply Gaussian noise to the input image.

Args:
    std_range (tuple[float, float]): Range for noise standard deviation as a fraction
        of the maximum value (255 for uint8 images or 1.0 for float images).
        Values should be in range [0, 1]. Default: (0.2, 0.44).
    mean_range (tuple[float, float]): Range for noise mean as a fraction
        of the maximum value (255 for uint8 images or 1.0 for float images).
        Values should be in range [-1, 1]. Default: (0.0, 0.0).
    per_channel (bool): If True, noise will be sampled for each channel independently.
        Otherwise, the noise will be sampled once for all channels. Default: True.
    noise_scale_factor (float): Scaling factor for noise generation. Value should be in the range (0, 1].
        When set to 1, noise is sampled for each pixel independently. If less, noise is sampled for a smaller size
        and resized to fit the shape of the image. Smaller values make the transform faster. Default: 1.0.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - The noise parameters (std_range and mean_range) are normalized to [0, 1] range:
      * For uint8 images, they are multiplied by 255
      * For float32 images, they are used directly
    - Setting per_channel=False is faster but applies the same noise to all channels
    - The noise_scale_factor parameter allows for a trade-off between transform speed and noise granularity

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (224, 224, 3), dtype=np.uint8)
    >>>
    >>> # Apply Gaussian noise with normalized std_range
    >>> transform = A.GaussNoise(std_range=(0.1, 0.2), p=1.0)  # 10-20% of max value
    >>> noisy_image = transform(image=image)['image']

c                  L    \ rS rSr% S\S'   S\S'   S\S'   \" SS	S
9rS\S'   Srg)GaussNoise.InitSchemai
  r   	std_rangezhAnnotated[tuple[float, float], AfterValidator(check_range_bounds(-1, 1)), AfterValidator(nondecreasing)]
mean_ranger  r  r   r   rM  r   noise_scale_factorru   N)rx   ry   rz   r{   r|   r   rv  r}   ru   rt   rq   r~   rs  
  s0    
 	


 	

 $)Q1$5E5rt   r~   c                P   > [         TU ]  US9  Xl        X l        X0l        X@l        g r   )r   r   rt  ru  r  rv  )rp   rt  ru  r  rv  r   r   s         rq   r   GaussNoise.__init__
  s+     	1"$&"4rt   c                .    [         R                  " X5      $ )aI  Apply the Gaussian noise to the input image.

Args:
    img (np.ndarray): The input image to apply the Gaussian noise to.
    noise_map (np.ndarray): The noise map to apply to the image.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied Gaussian noise.

r   	add_noiserp   r   	noise_mapr   s       rq   r   GaussNoise.apply!
  s    " //rt   c           
        SU;   a  US   OUS   S   n[         UR                     nU R                  R                  " U R                  6 nU R                  R                  " U R
                  6 n[        R                  " SU R                  (       a  SOSUR                  Xf4XU4S.UU R                  U R                  S9nS	U0$ )
a1  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, float]: Dictionary with the following key:
        - "noise_map" (np.ndarray): The noise map to apply to the image.

r  r   r   gaussian	per_pixelsharedru  rt  
noise_typespatial_moder   r   rj  approximationr   r}  )r
   r   r   r   rt  ru  r   generate_noiser  r   rv  r   )rp   r   r   r  rj  sigmarc   r}  s           rq   r   'GaussNoise.get_params_dependent_on_data4
  s      ")DWd8nQ6G'4	&&7~~%%t7))!(,(8(8h++#',e^L11!22
	 Y''rt   )ru  rv  r  rt  ))r  g)\(?        r  Tr   r   )
rt  r   ru  r   r  r  rv  r   r   r   r   r   r}  r   r   r   rw   r   rp  r   r   s   @rq   r9   r9   	  s    )V6, 6  *5*4 $%5&5 (5 	5
 "5 5 500 0 	0
 
0& ( (  ( 
	 (  (rt   r9   c                     ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjr            SS jr      SS jr	Sr
U =r$ )r<   iW
  u  Applies camera sensor noise to the input image, simulating high ISO settings.

This transform adds random noise to an image, mimicking the effect of using high ISO settings
in digital photography. It simulates two main components of ISO noise:
1. Color noise: random shifts in color hue
2. Luminance noise: random variations in pixel intensity

Args:
    color_shift (tuple[float, float]): Range for changing color hue.
        Values should be in the range [0, 1], where 1 represents a full 360° hue rotation.
        Default: (0.01, 0.05)

    intensity (tuple[float, float]): Range for the noise intensity.
        Higher values increase the strength of both color and luminance noise.
        Default: (0.1, 0.5)

    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - This transform only works with RGB images. It will raise a TypeError if applied to
      non-RGB images.
    - The color shift is applied in the HSV color space, affecting the hue channel.
    - Luminance noise is added to all channels independently.
    - This transform can be useful for data augmentation in low-light scenarios or when
      training models to be robust against noisy inputs.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.ISONoise(color_shift=(0.01, 0.05), intensity=(0.1, 0.5), p=0.5)
    >>> result = transform(image=image)
    >>> noisy_image = result["image"]

References:
    ISO noise in digital photography: https://en.wikipedia.org/wiki/Image_noise#In_digital_cameras

c                  *    \ rS rSr% S\S'   S\S'   Srg)ISONoise.InitSchemai
  r   color_shiftjAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, None)), AfterValidator(nondecreasing)]rw  ru   Nr   ru   rt   rq   r~   r  
  s    
 	


 	
rt   r~   c                8   > [         TU ]  US9  X l        Xl        g r   )r   r   rw  r  )rp   r  rw  r   r   s       rq   r   ISONoise.__init__
  s      	1"&rt   c                    [        U5        [        R                  " UUU[        R                  R                  U5      5      $ )a  Apply the ISONoise transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ISONoise transform to.
    color_shift (float): The color shift value.
    intensity (float): The intensity value.
    random_seed (int): The random seed.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ISONoise transform.

)r    r   	iso_noiser   randomdefault_rng)rp   r   r  rw  random_seedr   s         rq   r   ISONoise.apply
  s:    * 	cII!!+.	
 	
rt   c                    U R                   R                  SS5      nU R                  R                  " U R                  6 U R                  R                  " U R
                  6 US.$ )a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following keys:
        - "color_shift" (float): The color shift value.
        - "intensity" (float): The intensity value.
        - "random_seed" (int): The random seed.

r       )r  rw  r  )r   r  r   r   r  rw  )rp   r   r   r  s       rq   r   %ISONoise.get_params_dependent_on_data
  sW    $ ++44Q	B>>1143C3CD//@&
 	
rt   )r  rw  ))r  r  r   r   r   )r  r   rw  r   r   r   )r   r   r  r   rw  r   r  r   r   r   rw   r   rj  r   r   s   @rq   r<   r<   W
  s    .`

, 

 ,8)3	'(' '' 	' '

 
 	

 
 
 

:

 
 
	
 
rt   r<   c                  n   ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjrSS jrSS jr	Sr
U =r$ )r/   i
  a  Apply Contrast Limited Adaptive Histogram Equalization (CLAHE) to the input image.

CLAHE is an advanced method of improving the contrast in an image. Unlike regular histogram
equalization, which operates on the entire image, CLAHE operates on small regions (tiles)
in the image. This results in a more balanced equalization, preventing over-amplification
of contrast in areas with initially low contrast.

Args:
    clip_limit (tuple[float, float] | float): Controls the contrast enhancement limit.
        - If a single float is provided, the range will be (1, clip_limit).
        - If a tuple of two floats is provided, it defines the range for random selection.
        Higher values allow for more contrast enhancement, but may also increase noise.
        Default: (1, 4)

    tile_grid_size (tuple[int, int]): Defines the number of tiles in the row and column directions.
        Format is (rows, columns). Smaller tile sizes can lead to more localized enhancements,
        while larger sizes give results closer to global histogram equalization.
        Default: (8, 8)

    p (float): Probability of applying the transform. Default: 0.5

Notes:
    - Supports only RGB or grayscale images.
    - For color images, CLAHE is applied to the L channel in the LAB color space.
    - The clip limit determines the maximum slope of the cumulative histogram. A lower
      clip limit will result in more contrast limiting.
    - Tile grid size affects the adaptiveness of the method. More tiles increase local
      adaptiveness but can lead to an unnatural look if set too high.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    1, 3

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.CLAHE(clip_limit=(1, 4), tile_grid_size=(8, 8), p=1.0)
    >>> result = transform(image=image)
    >>> clahe_image = result["image"]

References:
    - Tutorial: https://docs.opencv.org/master/d5/daf/tutorial_py_histogram_equalization.html
    - "Contrast Limited Adaptive Histogram Equalization.": https://ieeexplore.ieee.org/document/109340

c                  *    \ rS rSr% S\S'   S\S'   Srg)CLAHE.InitSchemai
  r"   
clip_limitzGAnnotated[tuple[int, int], AfterValidator(check_range_bounds(1, None))]tile_grid_sizeru   Nr   ru   rt   rq   r~   r  
  s    ))__rt   r~   c                N   > [         TU ]  US9  [        SU5      U l        X l        g r  )r   r   r	   r  r  )rp   r  r  r   r   s       rq   r   CLAHE.__init__  s*     	14jA,rt   c                    [        U5      (       d  [        U5      (       d  Sn[        U5      e[        R                  " XU R
                  5      $ )aC  Apply the CLAHE transform to the input image.

Args:
    img (np.ndarray): The input image to apply the CLAHE transform to.
    clip_limit (float): The contrast enhancement limit.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied CLAHE transform.

z;CLAHE transformation expects 1-channel or 3-channel images.)r   r   r  r   claher  )rp   r   r  r   r  s        rq   r   CLAHE.apply  s@     C  );C)@)@OCC. ||CT-@-@AArt   c                L    SU R                   R                  " U R                  6 0$ )zGenerate parameters dependent on the input data.

Returns:
    dict[str, float]: Dictionary with the following key:
        - "clip_limit" (float): The contrast enhancement limit.

r  )r   r   r  ro   s    rq   r   CLAHE.get_params*  s"     dnn44dooFGGrt   )r  r  )g      @)r^  r^  r   )r  r  r  r   r   r   )r   r   r  r   r   r   rw   r   r  r   r   s   @rq   r/   r/   
  s[    2h`, ` 36*0	-/- (- 	- -B$H Hrt   r/   c                  f    \ rS rSrSr        S
S jrSS jrSS jrSS jr      SS jr	Sr
g	)r2   i5  a  Randomly rearrange channels of the image.

Args:
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Number of channels:
    Any

Image types:
    uint8, float32

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>>
    >>> # Create a sample image with distinct RGB channels
    >>> image = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> # Red channel (first channel)
    >>> image[:, :, 0] = np.linspace(0, 255, 100, dtype=np.uint8).reshape(1, 100)
    >>> # Green channel (second channel)
    >>> image[:, :, 1] = np.linspace(0, 255, 100, dtype=np.uint8).reshape(100, 1)
    >>> # Blue channel (third channel) - constant value
    >>> image[:, :, 2] = 128
    >>>
    >>> # Apply channel shuffle transform
    >>> transform = A.ChannelShuffle(p=1.0)
    >>> result = transform(image=image)
    >>> shuffled_image = result['image']
    >>>
    >>> # The channels have been randomly rearranged
    >>> # For example, the original order [R, G, B] might become [G, B, R] or [B, R, G]
    >>> # This results in a color shift while preserving all the original image data
    >>> # Note: For images with more than 3 channels, all channels are shuffled similarly

c                8    Uc  U$ [         R                  " X5      $ )ai  Apply the ChannelShuffle transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ChannelShuffle transform to.
    channels_shuffled (list[int] | None): The channels to shuffle.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ChannelShuffle transform.

)r   channel_shuffle)rp   r   channels_shuffledr   s       rq   r   ChannelShuffle.apply]  s     " $J%%c==rt   c                8    Uc  U$ [         R                  " X5      $ )ao  Apply the ChannelShuffle transform to the input images.

Args:
    images (np.ndarray): The input images to apply the ChannelShuffle transform to.
    channels_shuffled (list[int] | None): The channels to shuffle.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The images with the applied ChannelShuffle transform.

)r   volume_channel_shuffle)rp   r   r  r   s       rq   r   ChannelShuffle.apply_to_imagesr  s      $M,,VGGrt   c                8    Uc  U$ [         R                  " X5      $ )as  Apply the ChannelShuffle transform to the input volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the ChannelShuffle transform to.
    channels_shuffled (list[int] | None): The channels to shuffle.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volumes with the applied ChannelShuffle transform.

)r   volumes_channel_shuffle)rp   r   r  r   s       rq   r   ChannelShuffle.apply_to_volumes  s      $N--gIIrt   c                (    U R                   " X40 UD6$ )ao  Apply the ChannelShuffle transform to the input volume.

Args:
    volume (np.ndarray): The input volume to apply the ChannelShuffle transform to.
    channels_shuffled (list[int] | None): The channels to shuffle.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volume with the applied ChannelShuffle transform.

)r   )rp   r   r  r   s       rq   r   ChannelShuffle.apply_to_volume  s     ##FHHHrt   c                    US   n[        U5      S:X  d	  US   S:X  a  SS0$ [        [        US   5      5      nU R                  R	                  U5        SU0$ )a7  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following key:
        - "channels_shuffled" (tuple[int, ...] | None): The channels to shuffle.

r   r   r   r  N)r  r2  r+  r   shuffle)rp   r   r   r   ch_arrs        rq   r   +ChannelShuffle.get_params_dependent_on_data  s]      wu:?eBi1n'..eE"I&'v&#V,,rt   ru   N)r   r   r  list[int] | Noner   r   rw   r   )r   r   r  r  r   r   rw   r   )r   r   r  r  r   r   rw   r   )r   r   r  r  r   r   rw   r   rj  )rx   ry   rz   r{   r   r   r   r   r   r   r}   ru   rt   rq   r2   r2   5  sf    %N>> ,> 	>
 
>*H J I-- - 
	-rt   r2   c                  @    \ rS rSrSrS	S jrS
S jrSS jrSS jrSr	g)r?   i  uR  Invert the input image by subtracting pixel values from max values of the image types,
i.e., 255 for uint8 and 1.0 for float32.

Args:
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create a sample image with different elements
    >>> image = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> cv2.circle(image, (30, 30), 20, (255, 255, 255), -1)  # White circle
    >>> cv2.rectangle(image, (60, 60), (90, 90), (128, 128, 128), -1)  # Gray rectangle
    >>>
    >>> # Apply InvertImg transform
    >>> transform = A.InvertImg(p=1.0)
    >>> result = transform(image=image)
    >>> inverted_image = result['image']
    >>>
    >>> # Result:
    >>> # - Black background becomes white (0 → 255)
    >>> # - White circle becomes black (255 → 0)
    >>> # - Gray rectangle is inverted (128 → 127)
    >>> # The same approach works for float32 images (0-1 range) and grayscale images

c                .    [         R                  " U5      $ )a  Apply the InvertImg transform to the input image.

Args:
    img (np.ndarray): The input image to apply the InvertImg transform to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied InvertImg transform.

)r   invertr   s      rq   r   InvertImg.apply  s     }}S!!rt   c                .    U R                   " U/UQ70 UD6$ )aa  Apply the InvertImg transform to the input images.

Args:
    images (np.ndarray): The input images to apply the InvertImg transform to.
    *args (Any): Additional arguments (not used in this transform).
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The images with the applied InvertImg transform.

r   r]  s       rq   r   InvertImg.apply_to_images  r`  rt   c                .    U R                   " U/UQ70 UD6$ )ae  Apply the InvertImg transform to the input volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the InvertImg transform to.
    *args (Any): Additional arguments (not used in this transform).
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volumes with the applied InvertImg transform.

r   rb  s       rq   r   InvertImg.apply_to_volumes  rd  rt   c                .    U R                   " U/UQ70 UD6$ )aa  Apply the InvertImg transform to the input volume.

Args:
    volume (np.ndarray): The input volume to apply the InvertImg transform to.
    *args (Any): Additional arguments (not used in this transform).
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volume with the applied InvertImg transform.

r   rf  s       rq   r   InvertImg.apply_to_volume  r`  rt   ru   Nr   rm  rn  ro  )
rx   ry   rz   r{   r   r   r   r   r   r}   ru   rt   rq   r?   r?     s    $L"343rt   r?   c                     ^  \ rS rSrSr " S S\5      r  S   SU 4S jjjrSS jrSS jr	SS jr
SS	 jrSS
 jrSrU =r$ )rI   i  aP  Applies random gamma correction to the input image.

Gamma correction, or simply gamma, is a nonlinear operation used to encode and decode luminance
or tristimulus values in imaging systems. This transform can adjust the brightness of an image
while preserving the relative differences between darker and lighter areas, making it useful
for simulating different lighting conditions or correcting for display characteristics.

Args:
    gamma_limit (float | tuple[float, float]): If gamma_limit is a single float value, the range
        will be (1, gamma_limit). If it's a tuple of two floats, they will serve as
        the lower and upper bounds for gamma adjustment. Values are in terms of percentage change,
        e.g., (80, 120) means the gamma will be between 80% and 120% of the original.
        Default: (80, 120).
    eps (float): A small value added to the gamma to avoid division by zero or log of zero errors.
        Default: 1e-7.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - The gamma correction is applied using the formula: output = input^gamma
    - Gamma values > 1 will make the image darker, while values < 1 will make it brighter
    - This transform is particularly useful for:
      * Simulating different lighting conditions
      * Correcting for non-linear display characteristics
      * Enhancing contrast in certain regions of the image
      * Data augmentation in computer vision tasks

Mathematical Formulation:
    Let I be the input image and G (gamma) be the correction factor.
    The gamma correction is applied as follows:
    1. Normalize the image to [0, 1] range: I_norm = I / 255 (for uint8 images)
    2. Apply gamma correction: I_corrected = I_norm ^ (1 / G)
    3. Scale back to original range: output = I_corrected * 255 (for uint8 images)

    The actual gamma value used is calculated as:
    G = 1 + (random_value / 100), where random_value is sampled from gamma_limit range.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Default usage
    >>> transform = A.RandomGamma(p=1.0)
    >>> augmented_image = transform(image=image)["image"]

    # Custom gamma range
    >>> transform = A.RandomGamma(gamma_limit=(50, 150), p=1.0)
    >>> augmented_image = transform(image=image)["image"]

    # Applying with other transforms
    >>> transform = A.Compose([
    ...     A.RandomGamma(gamma_limit=(80, 120), p=0.5),
    ...     A.RandomBrightnessContrast(p=0.5),
    ... ])
    >>> augmented_image = transform(image=image)["image"]

References:
    - Gamma correction: https://en.wikipedia.org/wiki/Gamma_correction
    - Power law (Gamma) encoding: https://www.cambridgeincolour.com/tutorials/gamma-correction.htm

c                       \ rS rSr% S\S'   Srg)RandomGamma.InitSchemai_  r"   gamma_limitru   Nr   ru   rt   rq   r~   r  _  s    **rt   r~   c                B   > [         TU ]  US9  [        SU5      U l        g r  )r   r   r	   r  )rp   r  r   r   s      rq   r   RandomGamma.__init__b  s%    
 	1 5{Crt   c                *    [         R                  " XS9$ )aA  Apply the RandomGamma transform to the input image.

Args:
    img (np.ndarray): The input image to apply the RandomGamma transform to.
    gamma (float): The gamma value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied RandomGamma transform.

gamma)r   gamma_transform)rp   r   r  r   s       rq   r   RandomGamma.applyj  s     %%c77rt   c                     U R                  XS9$ )aG  Apply the RandomGamma transform to the input volume.

Args:
    volume (np.ndarray): The input volume to apply the RandomGamma transform to.
    gamma (float): The gamma value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volume with the applied RandomGamma transform.

r  r   )rp   r   r  r   s       rq   r   RandomGamma.apply_to_volumex       zz&z..rt   c                     U R                  XS9$ )aK  Apply the RandomGamma transform to the input volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the RandomGamma transform to.
    gamma (float): The gamma value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The volumes with the applied RandomGamma transform.

r  r   )rp   r   r  r   s       rq   r   RandomGamma.apply_to_volumes  s     zz'z//rt   c                     U R                  XS9$ )aG  Apply the RandomGamma transform to the input images.

Args:
    images (np.ndarray): The input images to apply the RandomGamma transform to.
    gamma (float): The gamma value.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The images with the applied RandomGamma transform.

r  r   )rp   r   r  r   s       rq   r   RandomGamma.apply_to_images  r  rt   c                R    SU R                   R                  " U R                  6 S-  0$ )a  Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform.
    data (dict[str, Any]): Input data.

Returns:
    dict[str, Any]: Dictionary with the following key:
        - "gamma" (float): The gamma value.

r  g      Y@)r   r   r  rp   r   r   s      rq   r   (RandomGamma.get_params_dependent_on_data  s-     T^^++T-=-=>F
 	
rt   )r  ))P   x   r   )r  r  r   r   )r   r   r  r   r   r   rw   r   )r   r   r  r   r   r   rw   r   )r   r   r  r   r   r   rw   r   )r   r   r  r   r   r   rw   r   rj  )rx   ry   rz   r{   r   r(   r~   r   r   r   r   r   r   r}   r   r   s   @rq   rI   rI     s]    EN+, +
 4=D0D D D8/0/
 
rt   rI   c                  d   ^  \ rS rSrSr " S S\5      r   S     S	U 4S jjjrS
S jrSr	U =r
$ )rW   i  a  Convert an image to grayscale and optionally replicate the grayscale channel.

This transform first converts a color image to a single-channel grayscale image using various methods,
then replicates the grayscale channel if num_output_channels is greater than 1.

Args:
    num_output_channels (int): The number of channels in the output image. If greater than 1,
        the grayscale channel will be replicated. Default: 3.
    method (Literal["weighted_average", "from_lab", "desaturation", "average", "max", "pca"]):
        The method used for grayscale conversion:
        - "weighted_average": Uses a weighted sum of RGB channels (0.299R + 0.587G + 0.114B).
          Works only with 3-channel images. Provides realistic results based on human perception.
        - "from_lab": Extracts the L channel from the LAB color space.
          Works only with 3-channel images. Gives perceptually uniform results.
        - "desaturation": Averages the maximum and minimum values across channels.
          Works with any number of channels. Fast but may not preserve perceived brightness well.
        - "average": Simple average of all channels.
          Works with any number of channels. Fast but may not give realistic results.
        - "max": Takes the maximum value across all channels.
          Works with any number of channels. Tends to produce brighter results.
        - "pca": Applies Principal Component Analysis to reduce channels.
          Works with any number of channels. Can preserve more information but is computationally intensive.
    p (float): Probability of applying the transform. Default: 0.5.

Raises:
    TypeError: If the input image doesn't have 3 channels for methods that require it.

Note:
    - The transform first converts the input image to single-channel grayscale, then replicates
      this channel if num_output_channels > 1.
    - "weighted_average" and "from_lab" are typically used in image processing and computer vision
      applications where accurate representation of human perception is important.
    - "desaturation" and "average" are often used in simple image manipulation tools or when
      computational speed is a priority.
    - "max" method can be useful in scenarios where preserving bright features is important,
      such as in some medical imaging applications.
    - "pca" might be used in advanced image analysis tasks or when dealing with hyperspectral images.

Image types:
    uint8, float32

Returns:
    np.ndarray: Grayscale image with the specified number of channels.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create a sample color image with distinct RGB values
    >>> image = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> # Red square in top-left
    >>> image[10:40, 10:40, 0] = 200
    >>> # Green square in top-right
    >>> image[10:40, 60:90, 1] = 200
    >>> # Blue square in bottom-left
    >>> image[60:90, 10:40, 2] = 200
    >>> # Yellow square in bottom-right (Red + Green)
    >>> image[60:90, 60:90, 0] = 200
    >>> image[60:90, 60:90, 1] = 200
    >>>
    >>> # Example 1: Default conversion (weighted average, 3 channels)
    >>> transform = A.ToGray(p=1.0)
    >>> result = transform(image=image)
    >>> gray_image = result['image']
    >>> # Output has 3 duplicate channels with values based on RGB perception weights
    >>> # R=0.299, G=0.587, B=0.114
    >>> assert gray_image.shape == (100, 100, 3)
    >>> assert np.allclose(gray_image[:, :, 0], gray_image[:, :, 1])
    >>> assert np.allclose(gray_image[:, :, 1], gray_image[:, :, 2])
    >>>
    >>> # Example 2: Single-channel output
    >>> transform = A.ToGray(num_output_channels=1, p=1.0)
    >>> result = transform(image=image)
    >>> gray_image = result['image']
    >>> assert gray_image.shape == (100, 100, 1)
    >>>
    >>> # Example 3: Using different conversion methods
    >>> # "desaturation" method (min+max)/2
    >>> transform_desaturate = A.ToGray(
    ...     method="desaturation",
    ...     p=1.0
    ... )
    >>> result = transform_desaturate(image=image)
    >>> gray_desaturate = result['image']
    >>>
    >>> # "from_lab" method (using L channel from LAB colorspace)
    >>> transform_lab = A.ToGray(
    ...     method="from_lab",
    ...     p=1.0
    >>> )
    >>> result = transform_lab(image=image)
    >>> gray_lab = result['image']
    >>>
    >>> # "average" method (simple average of channels)
    >>> transform_avg = A.ToGray(
    ...     method="average",
    ...     p=1.0
    >>> )
    >>> result = transform_avg(image=image)
    >>> gray_avg = result['image']
    >>>
    >>> # "max" method (takes max value across channels)
    >>> transform_max = A.ToGray(
    ...     method="max",
    ...     p=1.0
    >>> )
    >>> result = transform_max(image=image)
    >>> gray_max = result['image']
    >>>
    >>> # Example 4: Using grayscale in an augmentation pipeline
    >>> pipeline = A.Compose([
    ...     A.ToGray(p=0.5),           # 50% chance of grayscale conversion
    ...     A.RandomBrightnessContrast(p=1.0)  # Always apply brightness/contrast
    ... ])
    >>> result = pipeline(image=image)
    >>> augmented_image = result['image']  # May be grayscale or color
    >>>
    >>> # Example 5: Converting float32 image
    >>> float_image = image.astype(np.float32) / 255.0  # Range [0, 1]
    >>> transform = A.ToGray(p=1.0)
    >>> result = transform(image=float_image)
    >>> gray_float_image = result['image']
    >>> assert gray_float_image.dtype == np.float32
    >>> assert gray_float_image.max() <= 1.0

c                  8    \ rS rSr% \" SSS9rS\S'   S\S'   S	rg
)ToGray.InitSchemai4  zThe number of output channels.r   )descriptionr   r   num_output_channelsPLiteral['weighted_average', 'from_lab', 'desaturation', 'average', 'max', 'pca']r   ru   Nrx   ry   rz   r{   r   r  r|   r}   ru   rt   rq   r~   r  4  s$    #(8$
S 	

 	
rt   r~   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r  r   )rp   r  r   r   r   s       rq   r   ToGray.__init__B  s      	1#6 rt   c                
   [        U5      (       a  [        R                  " SSS9  U$ [        U5      nU[        :w  a  U R
                  S;  a  Sn[        U5      e[        R                  " XR                  U R
                  5      $ )a  Apply the ToGray transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ToGray transform to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ToGray transform.

zThe image is already gray.r   
stacklevel>   r-  pcaaveragedesaturationz/ToGray transformation expects 3-channel images.)
r   warningswarnr   r+   r   r  r   to_grayr  )rp   r   r   r  r  s        rq   r   ToGray.applyS  st     c""MM61EJ',++ D
 1
 DCC. ~~c#;#;T[[IIrt   )r   r  )r[   weighted_averager   )r  r   r   r  r   r   r   rx   ry   rz   r{   r   r(   r~   r   r   r}   r   r   s   @rq   rW   rW     sV    ~@
, 
  $%  
  "J Jrt   rW   c                  ^   ^  \ rS rSrSr " S S\5      r  S   S	U 4S jjjrS
S jrSr	U =r
$ )rX   ip  a  Convert an input image from grayscale to RGB format.

Args:
    num_output_channels (int): The number of channels in the output image. Default: 3.
    p (float): Probability of applying the transform. Default: 1.0.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    1

Note:
    - For single-channel (grayscale) images, the channel is replicated to create an RGB image.
    - If the input is already a 3-channel RGB image, it is returned unchanged.
    - This transform does not change the data type of the image (e.g., uint8 remains uint8).

Raises:
    TypeError: If the input image has more than 1 channel.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>>
    >>> # Convert a grayscale image to RGB
    >>> transform = A.Compose([A.ToRGB(p=1.0)])
    >>> grayscale_image = np.random.randint(0, 256, (100, 100), dtype=np.uint8)
    >>> rgb_image = transform(image=grayscale_image)['image']
    >>> assert rgb_image.shape == (100, 100, 3)

c                  ,    \ rS rSr% \" SS9rS\S'   Srg)ToRGB.InitSchemai  r   r   r   r  ru   Nr  ru   rt   rq   r~   r    s    #(A;S.rt   r~   c                ,   > [         TU ]  US9  Xl        g r   )r   r   r  )rp   r  r   r   s      rq   r   ToRGB.__init__  s    
 	1#6 rt   c                    [        U5      (       a+  [        R                  " SSS9  [        R                  " U5      $ [        U5      (       d  Sn[        U5      e[        R                  " UU R                  S9$ )a  Apply the ToRGB transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ToRGB transform to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ToRGB transform.

zThe image is already an RGB.r   r  zVToRGB transformation expects 2-dim images or 3-dim with the last dimension equal to 1.r  )
r   r  r  r   ascontiguousarrayr   r  r   grayscale_to_multichannelr  rp   r   r   r  s       rq   r   ToRGB.apply  sh     MM8QG'',,!#&&jCC. // $ 8 8
 	
rt   r  )r[   r   )r  r   r   r   r   r  r   s   @rq   rX   rX   p  sD    !F/, /
 $%7 7 7 7
 
rt   rX   c                  >   ^  \ rS rSrSrSSU 4S jjjrSS jrSrU =r$ )	rY   i  a"
  Apply a sepia filter to the input image.

This transform converts a color image to a sepia tone, giving it a warm, brownish tint
that is reminiscent of old photographs. The sepia effect is achieved by applying a
specific color transformation matrix to the RGB channels of the input image.
For grayscale images, the transform is a no-op and returns the original image.

Args:
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    1,3

Note:
    - The sepia effect only works with RGB images (3 channels). For grayscale images,
      the original image is returned unchanged since the sepia transformation would
      have no visible effect when R=G=B.
    - The sepia effect is created using a fixed color transformation matrix:
      [[0.393, 0.769, 0.189],
       [0.349, 0.686, 0.168],
       [0.272, 0.534, 0.131]]
    - The output image will have the same data type as the input image.
    - For float32 images, ensure the input values are in the range [0, 1].

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>>
    # Apply sepia effect to a uint8 RGB image
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.ToSepia(p=1.0)
    >>> sepia_image = transform(image=image)['image']
    >>> assert sepia_image.shape == image.shape
    >>> assert sepia_image.dtype == np.uint8
    >>>
    # Apply sepia effect to a float32 RGB image
    >>> image = np.random.rand(100, 100, 3).astype(np.float32)
    >>> transform = A.ToSepia(p=1.0)
    >>> sepia_image = transform(image=image)['image']
    >>> assert sepia_image.shape == image.shape
    >>> assert sepia_image.dtype == np.float32
    >>> assert 0 <= sepia_image.min() <= sepia_image.max() <= 1.0
    >>>
    # No effect on grayscale images
    >>> gray_image = np.random.randint(0, 256, (100, 100), dtype=np.uint8)
    >>> transform = A.ToSepia(p=1.0)
    >>> result = transform(image=gray_image)['image']
    >>> assert np.array_equal(result, gray_image)

Mathematical Formulation:
    Given an input pixel [R, G, B], the sepia tone is calculated as:
    R_sepia = 0.393*R + 0.769*G + 0.189*B
    G_sepia = 0.349*R + 0.686*G + 0.168*B
    B_sepia = 0.272*R + 0.534*G + 0.131*B

    For grayscale images where R=G=B, this transformation would result in a simple
    scaling of the original value, so we skip it.

    The output values are clipped to the valid range for the image's data type.

See Also:
    ToGray: For converting images to grayscale instead of sepia.

c                h   > [         TU ]  US9  [        R                  " / SQ/ SQ/ SQ/5      U l        g )Nr   )gx&?gS㥛?gx&1?)gtV?gʡE?g/$?)g rh?gJ+?gS㥛?)r   r   r   r   sepia_transformation_matrix)rp   r   r   s     rq   r   ToSepia.__init__   s0    1+-88"$9;PQ,
(rt   c                    [        U5      (       a  U$ [        U5      (       d  Sn[        U5      e[        R                  " XR
                  5      $ )a  Apply the ToSepia transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ToSepia transform to.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ToSepia transform.

z5ToSepia transformation expects 1 or 3-channel images.)r   r   r  r   linear_transformation_rgbr  r  s       rq   r   ToSepia.apply  sE     c""JC  ICC. //5U5UVVrt   )r  )r   )r   r   r   )	rx   ry   rz   r{   r   r   r   r}   r   r   s   @rq   rY   rY     s     EN
 
W Wrt   rY   c                  *    \ rS rSr% S\S'   S\S'   Srg)InterpolationPydantici  Literal[cv2.INTER_NEAREST, cv2.INTER_NEAREST_EXACT, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4, cv2.INTER_LINEAR_EXACT]upscale	downscaleru   Nr   ru   rt   rq   r  r    s      rt   r  c                     ^  \ rS rSrSr " S S\5      rS\R                  \R                  S.S4     SU 4S jjjr	SS	 jr
SS
 jrSrU =r$ )r5   i0  a		  Decrease image quality by downscaling and upscaling back.

This transform simulates the effect of a low-resolution image by first downscaling
the image to a lower resolution and then upscaling it back to its original size.
This process introduces loss of detail and can be used to simulate low-quality
images or to test the robustness of models to different image resolutions.

Args:
    scale_range (tuple[float, float]): Range for the downscaling factor.
        Should be two float values between 0 and 1, where the first value is less than or equal to the second.
        The actual downscaling factor will be randomly chosen from this range for each image.
        Lower values result in more aggressive downscaling.
        Default: (0.25, 0.25)

    interpolation_pair (dict[Literal["downscale", "upscale"], int]): A dictionary specifying
        the interpolation methods to use for downscaling and upscaling.
        Should contain two keys:
        - 'downscale': Interpolation method for downscaling
        - 'upscale': Interpolation method for upscaling
        Values should be OpenCV interpolation flags (e.g., cv2.INTER_NEAREST, cv2.INTER_LINEAR, etc.)
        Default: {'downscale': cv2.INTER_NEAREST, 'upscale': cv2.INTER_NEAREST}

    p (float): Probability of applying the transform. Should be in the range [0, 1].
        Default: 0.5

Targets:
    image, volume

Image types:
    uint8, float32

Note:
    - The actual downscaling factor is randomly chosen for each image from the range
      specified in scale_range.
    - Using different interpolation methods for downscaling and upscaling can produce
      various effects. For example, using INTER_NEAREST for both can create a pixelated look,
      while using INTER_LINEAR or INTER_CUBIC can produce smoother results.
    - This transform can be useful for data augmentation, especially when training models
      that need to be robust to variations in image quality or resolution.

Examples:
    >>> import albumentations as A
    >>> import cv2
    >>> transform = A.Downscale(
    ...     scale_range=(0.5, 0.75),
    ...     interpolation_pair={'downscale': cv2.INTER_NEAREST, 'upscale': cv2.INTER_LINEAR},
    ...     p=0.5
    ... )
    >>> transformed = transform(image=image)
    >>> downscaled_image = transformed['image']

c                  *    \ rS rSr% S\S'   S\S'   Srg)Downscale.InitSchemaif  dict[Literal['downscale', 'upscale'], Literal[cv2.INTER_NEAREST, cv2.INTER_NEAREST_EXACT, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4, cv2.INTER_LINEAR_EXACT]]interpolation_pairr   scale_rangeru   Nr   ru   rt   rq   r~   r  f  s    
 	

 	
rt   r~   )r  r  )r	  r
  r   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r  r  )rp   r  r  r   r   s       rq   r   Downscale.__init__y  s!    " 	1&"4rt   c                d    [         R                  " UUU R                  S   U R                  S   S9$ )aB  Apply the Downscale transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Downscale transform to.
    scale (float): The downscaling factor.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied Downscale transform.

r
  r	  )r  down_interpolationup_interpolation)r   r
  r  )rp   r   r  r   s       rq   r   Downscale.apply  s9     #66{C!44Y?	
 	
rt   c                L    SU R                   R                  " U R                  6 0$ )zGenerate parameters dependent on the input data.

Returns:
    dict[str, Any]: Dictionary with the following key:
        - "scale" (float): The downscaling factor.

r  )r   r   r  ro   s    rq   r   Downscale.get_params  s$     //1A1ABCCrt   )r  r  )r  r   r  r  r   r   )r   r   r  r   r   r   rw   r   r4  )rx   ry   rz   r{   r   r(   r~   cv2INTER_NEARESTr   r   r   r}   r   r   s   @rq   r5   r5   0  sk    3j
, 
* ,8 ))8I8IJ5(5
5 5 5*
&D Drt   r5   c                     ^  \ rS rSrSr " S S\5      r    S	       S
U 4S jjjr        SS jr      SS jr	Sr
U =r$ )r@   i  a  Apply multiplicative noise to the input image.

This transform multiplies each pixel in the image by a random value or array of values,
effectively creating a noise pattern that scales with the image intensity.

Args:
    multiplier (tuple[float, float]): The range for the random multiplier.
        Defines the range from which the multiplier is sampled.
        Default: (0.9, 1.1)

    per_channel (bool): If True, use a different random multiplier for each channel.
        If False, use the same multiplier for all channels.
        Setting this to False is slightly faster.
        Default: False

    elementwise (bool): If True, generates a unique multiplier for each pixel.
        If False, generates a single multiplier (or one per channel if per_channel=True).
        Default: False

    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - When elementwise=False and per_channel=False, a single multiplier is applied to the entire image.
    - When elementwise=False and per_channel=True, each channel gets a different multiplier.
    - When elementwise=True and per_channel=False, each pixel gets the same multiplier across all channels.
    - When elementwise=True and per_channel=True, each pixel in each channel gets a unique multiplier.
    - Setting per_channel=False is slightly faster, especially for larger images.
    - This transform can be used to simulate various lighting conditions or to create noise that
      scales with image intensity.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.MultiplicativeNoise(multiplier=(0.9, 1.1), per_channel=True, p=1.0)
    >>> result = transform(image=image)
    >>> noisy_image = result["image"]

References:
    Multiplicative noise: https://en.wikipedia.org/wiki/Multiplicative_noise

c                  4    \ rS rSr% S\S'   S\S'   S\S'   Srg)	MultiplicativeNoise.InitSchemai  r  
multiplierr  r  elementwiseru   Nr   ru   rt   rq   r~   r    s    
 	

 rt   r~   c                Z   > [         TU ]  US9  [        SU5      U l        X0l        X l        g r  )r   r   r	   r  r  r  )rp   r  r  r  r   r   s        rq   r   MultiplicativeNoise.__init__  s0     	14jA&&rt   c                    [        X5      $ )aq  Apply the MultiplicativeNoise transform to the input image.

Args:
    img (np.ndarray): The input image to apply the MultiplicativeNoise transform to.
    multiplier (float | np.ndarray): The random multiplier.
    **kwargs (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied MultiplicativeNoise transform.

)r   )rp   r   r  kwargss       rq   r   MultiplicativeNoise.apply  s    " ((rt   c                   SU;   a  US   OUS   S   n[        U5      nU R                  (       a2  U R                  (       a  UR                  O/ UR                  SS QSP7nOU R                  (       a  U4OSnU R                  R                  U R                  S   U R                  S   U5      R                  [        R                  5      nU R                  (       d  US:  a  [        R                  " XdSS	9nU R                  (       d$  U R                  (       a  UR                  SSS5      nUR                  UR                  :w  a  UR                  5       nS
U0$ )Generate parameters dependent on the input data.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

Returns:
    dict[str, Any]: The parameters of the transform.

r  r   r   Nr   r   )r   r  r  r  )r   r  r  r   r   r   r  astyper   r   repeatreshapesqueeze)rp   r   r   r  r  r   r  s          rq   r   0MultiplicativeNoise.get_params_dependent_on_data	  s     ")DWd8nQ6G'.#'#3#3EKK9N5;;r?9NA9NE'+'7'7\OTE**22OOAOOA
 &
	 	 L1$4:"EJD$4$4#++Aq"5Ju{{*#++-Jj))rt   )r  r  r  ))r@  g?FFr   )r  r  r  r  r  r  r   r   )r   r   r  r  r#  r   rw   r   rj  r   r   s   @rq   r@   r@     s    2h,  3=!!
'/
' 
' 	
'
 
' 
')) ') 	)
 
)&)*)* )* 
	)* )*rt   r@   c                     ^  \ rS rSrSr " S S\5      r  S	   S
U 4S jjjr        SS jr      SS jr	Sr
U =r$ )r8   i5  a  Apply Fancy PCA augmentation to the input image.

This augmentation technique applies PCA (Principal Component Analysis) to the image's color channels,
then adds multiples of the principal components to the image, with magnitudes proportional to the
corresponding eigenvalues times a random variable drawn from a Gaussian with mean 0 and standard
deviation 'alpha'.

Args:
    alpha (float): Standard deviation of the Gaussian distribution used to generate
        random noise for each principal component. Default: 0.1.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    any

Note:
    - This augmentation is particularly effective for RGB images but can work with any number of channels.
    - For grayscale images, it applies a simplified version of the augmentation.
    - The transform preserves the mean of the image while adjusting the color/intensity variation.
    - This implementation is based on the paper by Krizhevsky et al. and is similar to the one used
      in the original AlexNet paper.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.FancyPCA(alpha=0.1, p=1.0)
    >>> result = transform(image=image)
    >>> augmented_image = result["image"]

References:
    ImageNet Classification with Deep Convolutional Neural Networks: In Advances in Neural Information
    Processing Systems (Vol. 25). Curran Associates, Inc.

c                  ,    \ rS rSr% \" SS9rS\S'   Srg)FancyPCA.InitSchemai`  r   r   r   r  ru   N)rx   ry   rz   r{   r   r  r|   r}   ru   rt   rq   r~   r.  `  s    {u"rt   r~   c                ,   > [         TU ]  US9  Xl        g r   )r   r   r  )rp   r  r   r   s      rq   r   FancyPCA.__init__c  s    
 	1
rt   c                .    [         R                  " X5      $ )ab  Apply the FancyPCA transform to the input image.

Args:
    img (np.ndarray): The input image to apply the FancyPCA transform to.
    alpha_vector (np.ndarray): The random noise for each principal component.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied FancyPCA transform.

)r   	fancy_pca)rp   r   alpha_vectorr   s       rq   r   FancyPCA.applyk  s    " 22rt   c                    US   n[        U5      [        :X  a  US   OSnU R                  R                  SU R                  U5      R                  [        R                  5      nSU0$ )r&  r   r  r   r   r3  )r  r   r   r  r  r'  r   r   )rp   r   r   r   r  r3  s         rq   r   %FancyPCA.get_params_dependent_on_data~  sb     w$'J2N$NuRyTU,,33Atzz<PWWJJ
 --rt   )r  r  )r  r   r   r   )r   r   r3  r   r   r   rw   r   rj  r   r   s   @rq   r8   r8   5  s    (T#, #
   33 !3 	3
 
3&.. . 
	. .rt   r8   c                     ^  \ rS rSrSr " S S\5      r     S	         S
U 4S jjjrSS jr                SS jr	Sr
U =r$ )r4   i  a+  Randomly changes the brightness, contrast, saturation, and hue of an image.

This transform is similar to torchvision's ColorJitter but with some differences due to the use of OpenCV
instead of Pillow. The main differences are:
1. OpenCV and Pillow use different formulas to convert images to HSV format.
2. This implementation uses value saturation instead of uint8 overflow as in Pillow.

These differences may result in slightly different output compared to torchvision's ColorJitter.

Args:
    brightness (tuple[float, float] | float): How much to jitter brightness.
        If float:
            The brightness factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
        If tuple:
            The brightness factor is sampled from the range specified.
        Should be non-negative numbers.
        Default: (0.8, 1.2)

    contrast (tuple[float, float] | float): How much to jitter contrast.
        If float:
            The contrast factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
        If tuple:
            The contrast factor is sampled from the range specified.
        Should be non-negative numbers.
        Default: (0.8, 1.2)

    saturation (tuple[float, float] | float): How much to jitter saturation.
        If float:
            The saturation factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
        If tuple:
            The saturation factor is sampled from the range specified.
        Should be non-negative numbers.
        Default: (0.8, 1.2)

    hue (float or tuple of float (min, max)): How much to jitter hue.
        If float:
            The hue factor is chosen uniformly from [-hue, hue]. Should have 0 <= hue <= 0.5.
        If tuple:
            The hue factor is sampled from the range specified. Values should be in range [-0.5, 0.5].
        Default: (-0.5, 0.5)

     p (float): Probability of applying the transform. Should be in the range [0, 1].
        Default: 0.5


Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    1, 3

Note:
    - The order of application for these color transformations is random for each image.
    - The ranges for brightness, contrast, and saturation are applied as multiplicative factors.
    - The range for hue is applied as an additive factor.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1, p=1.0)
    >>> result = transform(image=image)
    >>> jittered_image = result['image']

References:
    - ColorJitter: https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html
    - Color Conversions: https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html

c                  z    \ rS rSr% S\S'   S\S'   S\S'   S\S'   \" SSSS5      \      S
S j5       5       rSrg	)ColorJitter.InitSchemai  r  
brightnesscontrast
saturationhuec                   UR                   S:X  a  SnSnSnO!UR                   S;   a  S[        S5      4nSnSn[        U[        R                  5      (       a>  US:  a  [        S	UR                    S
35      eWU-
  nW(       a  [        US5      nXdU-   4nO@[        U[        5      (       a+  [        U5      [        :X  a  [        U/WQUR                   P76   [        SU5      $ )Nr=  g      r   r   F)r:  r;  r<  infr   TzIf z- is a single number, it must be non negative.r   )
field_namer   r&  numbersNumberrn   r-  r  r  r,   r   r	   )r'  valueinfoboundsbiasr  lefts          rq   _check_ranges$ColorJitter.InitSchema._check_ranges  s     %'"$LLE%L%0019$doo..[\  e|tQ<De|,E5))c%jD.@E<F<DOO<-u55rt   ru   N)rD  r  rE  r   rw   r   )	rx   ry   rz   r{   r|   r   r+  rI  r}   ru   rt   rq   r~   r9    s\    //--//((	z<	G		6.	6 !	6 !		6 
 
H	6rt   r~   c                .  > [         TU ]  US9  [        SU5      U l        [        SU5      U l        [        SU5      U l        [        SU5      U l        [        R                  [        R                  [        R                  [        R                  /U l        g r  )r   r   r	   r:  r;  r<  r=  r   adjust_brightness_torchvisionadjust_contrast_torchvisionadjust_saturation_torchvisionadjust_hue_torchvision
transforms)rp   r:  r;  r<  r=  r   r   s         rq   r   ColorJitter.__init__  s     	14jA2H=4jA-s3 00..00))	
rt   c                h   U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R
                  6 n/ SQnU R                  R                  U5        UUUUUS.$ )zsGenerate parameters for the ColorJitter transform.

Returns:
    dict[str, Any]: The parameters of the transform.

)r   r   r   r[   )r:  r;  r<  r=  order)r   r   r:  r;  r<  r=  r   r  )rp   r:  r;  r<  r=  rS  s         rq   r   ColorJitter.get_params  s     ^^++T__=
>>))4==9^^++T__=
nn$$dhh/%%e, % $
 	
rt   c                    [        U5      (       d  [        U5      (       d  Sn[        U5      eX#XE/n	U H  n
U R                  U
   " XU
   5      nM     U$ )a  Apply the ColorJitter transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ColorJitter transform to.
    brightness (float): The brightness factor.
    contrast (float): The contrast factor.
    saturation (float): The saturation factor.
    hue (float): The hue factor.
    order (list[int]): The order of application for the color transformations.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ColorJitter transform.

zAColorJitter transformation expects 1-channel or 3-channel images.)r   r   r  rP  )rp   r   r:  r;  r<  r=  rS  r   r  color_transformsr(  s              rq   r   ColorJitter.apply0  s[    2 C  );C)@)@UCC. &*BA//!$S1*=>C 
rt   )r:  r;  r=  r<  rP  )g?g333333?rX  rX  r?  r   )
r:  r  r;  r  r<  r  r=  r  r   r   r4  )r   r   r:  r   r;  r   r<  r   r=  r   rS  r  r   r   rw   r   )rx   ry   rz   r{   r   r(   r~   r   r   r   r}   r   r   s   @rq   r4   r4     s    GR"6, "6L 3=0:2<+6
/
 .
 0	

 )
 
 
,
.     	 
         
   rt   r4   c                     ^  \ rS rSrSr " S S\5      r\" S5      \SS j5       5       r	      S           SU 4S jjjr
\      SS j5       rSS	 jr          SS
 jrSrU =r$ )rR   iS  u.  Sharpen the input image using either kernel-based or Gaussian interpolation method.

Implements two different approaches to image sharpening:
1. Traditional kernel-based method using Laplacian operator
2. Gaussian interpolation method (similar to Kornia's approach)

Args:
    alpha (tuple[float, float]): Range for the visibility of sharpening effect.
        At 0, only the original image is visible, at 1.0 only its processed version is visible.
        Values should be in the range [0, 1].
        Used in both methods. Default: (0.2, 0.5).

    lightness (tuple[float, float]): Range for the lightness of the sharpened image.
        Only used in 'kernel' method. Larger values create higher contrast.
        Values should be greater than 0. Default: (0.5, 1.0).

    method (Literal['kernel', 'gaussian']): Sharpening algorithm to use:
        - 'kernel': Traditional kernel-based sharpening using Laplacian operator
        - 'gaussian': Interpolation between Gaussian blurred and original image
        Default: 'kernel'

    kernel_size (int): Size of the Gaussian blur kernel for 'gaussian' method.
        Must be odd. Default: 5

    sigma (float): Standard deviation for Gaussian kernel in 'gaussian' method.
        Default: 1.0

    p (float): Probability of applying the transform. Default: 0.5.

Image types:
    uint8, float32

Number of channels:
    Any

Mathematical Formulation:
    1. Kernel Method:
       The sharpening operation is based on the Laplacian operator L:
       L = [[-1, -1, -1],
            [-1,  8, -1],
            [-1, -1, -1]]

       The final kernel K is a weighted sum:
       K = (1 - a)I + a(L + λI)

       where:
       - a is the alpha value
       - λ is the lightness value
       - I is the identity kernel

       The output image O is computed as:
       O = K * I  (convolution)

    2. Gaussian Method:
       Based on the unsharp mask principle:
       O = aI + (1-a)G

       where:
       - I is the input image
       - G is the Gaussian blurred version of I
       - a is the alpha value (sharpness)

       The Gaussian kernel G(x,y) is defined as:
       G(x,y) = (1/(2πs²))exp(-(x²+y²)/(2s²))

Note:
    - Kernel sizes must be odd to maintain spatial alignment
    - Methods produce different visual results:
      * Kernel method: More pronounced edges, possible artifacts
      * Gaussian method: More natural look, limited to original sharpness

Examples:
    >>> import albumentations as A
    >>> import numpy as np

    # Traditional kernel sharpening
    >>> transform = A.Sharpen(
    ...     alpha=(0.2, 0.5),
    ...     lightness=(0.5, 1.0),
    ...     method='kernel',
    ...     p=1.0
    ... )

    # Gaussian interpolation sharpening
    >>> transform = A.Sharpen(
    ...     alpha=(0.5, 1.0),
    ...     method='gaussian',
    ...     kernel_size=5,
    ...     sigma=1.0,
    ...     p=1.0
    ... )

References:
    - R. C. Gonzalez and R. E. Woods, "Digital Image Processing (4th Edition),": Chapter 3:
        Intensity Transformations and Spatial Filtering.
    - J. C. Russ, "The Image Processing Handbook (7th Edition),": Chapter 4: Image Enhancement.
    - T. Acharya and A. K. Ray, "Image Processing: Principles and Applications,": Chapter 5: Image Enhancement.
    - Unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking
    - Laplacian operator: https://en.wikipedia.org/wiki/Laplace_operator
    - Gaussian blur: https://en.wikipedia.org/wiki/Gaussian_blur

See Also:
    - Blur: For Gaussian blurring
    - UnsharpMask: Alternative sharpening method
    - RandomBrightnessContrast: For adjusting image contrast

c                  `    \ rS rSr% S\S'   S\S'   S\S'   \" SS	9rS
\S'   \" SS9rS\S'   Srg)Sharpen.InitSchemai  HAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, 1))]r  KAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, None))]	lightnessLiteral['kernel', 'gaussian']r   r[   r   r   kernel_sizer   r   r   r  ru   N)	rx   ry   rz   r{   r|   r   r`  r  r}   ru   rt   rq   r~   r[    s0    WW^^-- A;S&{u"rt   r~   r`  c                "    US-  S:X  a  US-   $ U$ )Nr   r   r   ru   )r'  rD  s     rq   _check_kernel_sizeSharpen._check_kernel_size  s     "AINuqy55rt   c                \   > [         TU ]  US9  Xl        X l        X0l        X@l        XPl        g r   )r   r   r  r^  r   r`  r  )rp   r  r^  r   r`  r  r   r   s          rq   r   Sharpen.__init__  s/     	1
"&
rt   c                    [         R                  " / SQ/ SQ/ SQ/[         R                  S9n[         R                  " / SQSSU-   S// SQ/[         R                  S9nSU -
  U-  X-  -   $ )Nr   r   r   r   r   r   r   )r  r  r  r  r^  r   r   r   r   )r  r^  matrix_nochangematrix_effects       rq   __generate_sharpening_matrix$Sharpen.__generate_sharpening_matrix  sb    
 ((Iy)#DBJJWBIr2LA**

 E	_,u/DDDrt   c                    U R                   R                  " U R                  6 nU R                  S:X  a8  U R                   R                  " U R                  6 nUU R                  UU5      S.$ USS.$ )zoGenerate parameters for the Sharpen transform.

Returns:
    dict[str, Any]: The parameters of the transform.

kernel)r  sharpening_matrixN)r   r   r  r   r^  $_Sharpen__generate_sharpening_matrix)rp   r  r^  s      rq   r   Sharpen.get_params  sn     &&

3;;("..?I%)%F%F&  T::rt   c                    U R                   S:X  a  [        R                  " X5      $ [        R                  " XU R                  U R
                  5      $ )a$  Apply the Sharpen transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Sharpen transform to.
    alpha (float): The alpha value.
    sharpening_matrix (np.ndarray | None): The sharpening matrix.
    **params (Any): Additional parameters for the transform.

ro  )r   r   convolvesharpen_gaussianr`  r  )rp   r   r  rp  r   s        rq   r   Sharpen.apply  s>      ;;("??3::&&s43C3CTZZPPrt   )r  r`  r^  r   r  )rD  r   rw   r   )r  r   )r   r   ro  r(  r   r   )r  r   r^  r   r   r_  r`  r   r  r   r   r   )r  r   r^  r   rw   r   r4  )
r   r   r  r   rp  znp.ndarray | Noner   r   rw   r   )rx   ry   rz   r{   r   r(   r~   r   r+  rb  r   staticmethodrq  r   r   r}   r   r   s   @rq   rR   rR   S  s	   jX#, # ]#6  $6
 &0)308" ' .	
      
E
E
E 

E 
E;*QQ Q -	Q
 Q 
Q Qrt   rR   c                     ^  \ rS rSrSr " S S\5      r   S
     SU 4S jjjr\      SS j5       r	SS jr
        SS jrS	rU =r$ )r6   i  a  Apply embossing effect to the input image.

This transform creates an emboss effect by highlighting edges and creating a 3D-like texture
in the image. It works by applying a specific convolution kernel to the image that emphasizes
differences in adjacent pixel values.

Args:
    alpha (tuple[float, float]): Range to choose the visibility of the embossed image.
        At 0, only the original image is visible, at 1.0 only its embossed version is visible.
        Values should be in the range [0, 1].
        Alpha will be randomly selected from this range for each image.
        Default: (0.2, 0.5)

    strength (tuple[float, float]): Range to choose the strength of the embossing effect.
        Higher values create a more pronounced 3D effect.
        Values should be non-negative.
        Strength will be randomly selected from this range for each image.
        Default: (0.2, 0.7)

    p (float): Probability of applying the transform. Should be in the range [0, 1].
        Default: 0.5

Targets:
    image, volume

Image types:
    uint8, float32

Note:
    - The emboss effect is created using a 3x3 convolution kernel.
    - The 'alpha' parameter controls the blend between the original image and the embossed version.
      A higher alpha value will result in a more pronounced emboss effect.
    - The 'strength' parameter affects the intensity of the embossing. Higher strength values
      will create more contrast in the embossed areas, resulting in a stronger 3D-like effect.
    - This transform can be useful for creating artistic effects or for data augmentation
      in tasks where edge information is important.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> transform = A.Emboss(alpha=(0.2, 0.5), strength=(0.2, 0.7), p=0.5)
    >>> result = transform(image=image)
    >>> embossed_image = result['image']

References:
    - Image Embossing: https://en.wikipedia.org/wiki/Image_embossing
    - Application of Emboss Filtering in Image Processing: https://www.researchgate.net/publication/303412455_Application_of_Emboss_Filtering_in_Image_Processing

c                  *    \ rS rSr% S\S'   S\S'   Srg)Emboss.InitSchemaiG  r\  r  r]  strengthru   Nr   ru   rt   rq   r~   r{  G  s    WW]]rt   r~   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r  r|  )rp   r  r|  r   r   s       rq   r   Emboss.__init__K  s     	1
 rt   c                    [         R                  " / SQ/ SQ/ SQ/[         R                  S9n[         R                  " SU-
  SU-
  S/SU-
  SSU-   /SSU-   SU-   //[         R                  S9nSU -
  U-  X-  -   $ )Nrg  rh  r   r  r   r   ri  )alpha_samplestrength_samplerj  rk  s       rq   __generate_emboss_matrixEmboss.__generate_emboss_matrixU  s    
 ((Iy)#DBJJWo%q?':A>_$a_)<=A'_)<=
 **
 L O3l6RRRrt   c                    U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R	                  UUS9nSU0$ )zuGenerate parameters for the Emboss transform.

Returns:
    dict[str, np.ndarray]: The parameters of the transform.

)r  r  emboss_matrix)r   r   r  r|  _Emboss__generate_emboss_matrix)rp   r  r|  r  s       rq   r   Emboss.get_paramse  s\     &&

3>>))4==955$ 6 
  //rt   c                .    [         R                  " X5      $ )zApply the Emboss transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Emboss transform to.
    emboss_matrix (np.ndarray): The emboss matrix.
    **params (Any): Additional parameters for the transform.

r   rt  )rp   r   r  r   s       rq   r   Emboss.applyt  s     s22rt   )r  r|  )rw  )r  rh  r   )r  r   r|  r   r   r   )r  r   r  r   rw   r   rw   rB  )r   r   r  r   r   r   rw   r   )rx   ry   rz   r{   r   r(   r~   r   rx  r  r   r   r}   r   r   s   @rq   r6   r6     s    1f^, ^ &0(2	!"! &! 	! ! S S#S 
S S033 "3 	3
 
3 3rt   r6   c                     ^  \ rS rSrSr " S S\5      rSSS\R                  S4         SU 4S	 jjjr	SS
 jr
          SS jrSrU =r$ )rV   i  a  Transform images partially/completely to their superpixel representation.

Args:
    p_replace (tuple[float, float] | float): Defines for any segment the probability that the pixels within that
        segment are replaced by their average color (otherwise, the pixels are not changed).


        * A probability of ``0.0`` would mean, that the pixels in no
            segment are replaced by their average color (image is not
            changed at all).
        * A probability of ``0.5`` would mean, that around half of all
            segments are replaced by their average color.
        * A probability of ``1.0`` would mean, that all segments are
            replaced by their average color (resulting in a voronoi
            image).

        Behavior based on chosen data types for this parameter:
        * If a ``float``, then that ``float`` will always be used.
        * If ``tuple`` ``(a, b)``, then a random probability will be
        sampled from the interval ``[a, b]`` per image.
        Default: (0.1, 0.3)

    n_segments (tuple[int, int] | int): Rough target number of how many superpixels to generate.
        The algorithm may deviate from this number.
        Lower value will lead to coarser superpixels.
        Higher values are computationally more intensive and will hence lead to a slowdown.
        If tuple ``(a, b)``, then a value from the discrete interval ``[a..b]`` will be sampled per image.
        Default: (15, 120)

    max_size (int | None): Maximum image size at which the augmentation is performed.
        If the width or height of an image exceeds this value, it will be
        downscaled before the augmentation so that the longest side matches `max_size`.
        This is done to speed up the process. The final output image has the same size as the input image.
        Note that in case `p_replace` is below ``1.0``,
        the down-/upscaling will affect the not-replaced pixels too.
        Use ``None`` to apply no down-/upscaling.
        Default: 128

    interpolation (OpenCV flag): Flag that is used to specify the interpolation algorithm. Should be one of:
        cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
        Default: cv2.INTER_LINEAR.

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - This transform can significantly change the visual appearance of the image.
    - The transform makes use of a superpixel algorithm, which tends to be slow.
    If performance is a concern, consider using `max_size` to limit the image size.
    - The effect of this transform can vary greatly depending on the `p_replace` and `n_segments` parameters.
    - When `p_replace` is high, the image can become highly abstracted, resembling a voronoi diagram.
    - The transform preserves the original image type (uint8 or float32).

Mathematical Formulation:
    1. The image is segmented into approximately `n_segments` superpixels using the SLIC algorithm.
    2. For each superpixel:
    - With probability `p_replace`, all pixels in the superpixel are replaced with their mean color.
    - With probability `1 - p_replace`, the superpixel is left unchanged.
    3. If the image was resized due to `max_size`, it is resized back to its original dimensions.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)

    # Apply superpixels with default parameters
    >>> transform = A.Superpixels(p=1.0)
    >>> augmented_image = transform(image=image)['image']

    # Apply superpixels with custom parameters
    >>> transform = A.Superpixels(
    ...     p_replace=(0.5, 0.7),
    ...     n_segments=(50, 100),
    ...     max_size=None,
    ...     interpolation=cv2.INTER_NEAREST,
    ...     p=1.0
    ... )
    >>> augmented_image = transform(image=image)['image']

c                  J    \ rS rSr% S\S'   S\S'   \" SS9rS\S	'   S
\S'   Srg)Superpixels.InitSchemai  r%   	p_replacer#   
n_segmentsr   r   rG  max_sizer  interpolationru   N)rx   ry   rz   r{   r|   r   r  r}   ru   rt   rq   r~   r    s&    ##''${**
 	
rt   r~   )r   r   )r   r      r   c                |   > [         TU ]  US9  [        SU5      U l        [        SU5      U l        X0l        X@l        g )Nr   r   r   )r   r   r	   r  r  r  r  )rp   r  r  r  r  r   r   s         rq   r   Superpixels.__init__  s>      	13Y?0*= *rt   c                    U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R
                  R                  U5      U:  US.$ )zsGenerate parameters for the Superpixels transform.

Returns:
    dict[str, Any]: The parameters of the transform.

)replace_samplesr  )r   r   r  r   r  r   r  )rp   r  r   s      rq   r   Superpixels.get_params  sZ     ^^++T__=
NN""DNN3#44;;JG!K$
 	
rt   c                ^    [         R                  " UUUU R                  U R                  5      $ )a  Apply the Superpixels transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Superpixels transform to.
    replace_samples (Sequence[bool]): Whether to replace pixels in segments.
    n_segments (int): Number of superpixels.
    **kwargs (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied Superpixels transform.

)r   superpixelsr  r  )rp   r   r  r  r#  s        rq   r   Superpixels.apply  s0    & !!MM
 	
rt   )r  r  r  r  )
r  r  r  tuple[int, int] | intr  rG  r  r  r   r   r4  )
r   r   r  zSequence[bool]r  r   r#  r   rw   r   )rx   ry   rz   r{   r   r(   r~   r  INTER_LINEARr   r   r   r}   r   r   s   @rq   rV   rV     s    Wr
, 
  2:,6" +.+ *+ 	+

+ + +,


 (
 	

 
 

 
rt   rV   c                     ^  \ rS rSrSr " S S\5      rS\R                  S-  \R                  S-  4S4     SU 4S	 jjjr	SS
 jr
SS jrSrU =r$ )rP   i-  u  Create ringing or overshoot artifacts by convolving the image with a 2D sinc filter.

This transform simulates the ringing artifacts that can occur in digital image processing,
particularly after sharpening or edge enhancement operations. It creates oscillations
or overshoots near sharp transitions in the image.

Args:
    blur_limit (tuple[int, int] | int): Maximum kernel size for the sinc filter.
        Must be an odd number in the range [3, inf).
        If a single int is provided, the kernel size will be randomly chosen
        from the range (3, blur_limit). If a tuple (min, max) is provided,
        the kernel size will be randomly chosen from the range (min, max).
        Default: (7, 15).
    cutoff (tuple[float, float]): Range to choose the cutoff frequency in radians.
        Values should be in the range (0, π). A lower cutoff frequency will
        result in more pronounced ringing effects.
        Default: (π/4, π/2).
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Number of channels:
    Any

Note:
    - Ringing artifacts are oscillations of the image intensity function in the neighborhood
      of sharp transitions, such as edges or object boundaries.
    - This transform uses a 2D sinc filter (also known as a 2D cardinal sine function)
      to introduce these artifacts.
    - The severity of the ringing effect is controlled by both the kernel size (blur_limit)
      and the cutoff frequency.
    - Larger kernel sizes and lower cutoff frequencies will generally produce more
      noticeable ringing effects.
    - This transform can be useful for:
      * Simulating imperfections in image processing or transmission systems
      * Testing the robustness of computer vision models to ringing artifacts
      * Creating artistic effects that emphasize edges and transitions in images

Mathematical Formulation:
    The 2D sinc filter kernel is defined as:

    K(x, y) = cutoff * J₁(cutoff * √(x² + y²)) / (2π * √(x² + y²))

    where:
    - J₁ is the Bessel function of the first kind of order 1
    - cutoff is the chosen cutoff frequency
    - x and y are the distances from the kernel center

    The filtered image I' is obtained by convolving the input image I with the kernel K:

    I'(x, y) = ∑∑ I(x-u, y-v) * K(u, v)

    The convolution operation introduces the ringing artifacts near sharp transitions.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)

    # Apply ringing effect with default parameters
    >>> transform = A.RingingOvershoot(p=1.0)
    >>> ringing_image = transform(image=image)['image']

    # Apply ringing effect with custom parameters
    >>> transform = A.RingingOvershoot(
    ...     blur_limit=(9, 17),
    ...     cutoff=(np.pi/6, np.pi/3),
    ...     p=1.0
    ... )
    >>> ringing_image = transform(image=image)['image']

References:
    - Ringing artifacts: https://en.wikipedia.org/wiki/Ringing_artifacts
    - Sinc filter: https://en.wikipedia.org/wiki/Sinc_filter
    - Digital Image Processing: Rafael C. Gonzalez and Richard E. Woods, 4th Edition

c                  *    \ rS rSr% S\S'   S\S'   Srg)RingingOvershoot.InitSchemai  r  
blur_limitzkAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, np.pi)), AfterValidator(nondecreasing)]cutoffru   Nr   ru   rt   rq   r~   r    s    ))
 	
rt   r~   )rg     r[  r   r   c                N   > [         TU ]  US9  [        SU5      U l        X l        g Nr   r   )r   r   r	   r  r  )rp   r  r  r   r   s       rq   r   RingingOvershoot.__init__  s)     	10*=rt   c                >  ^^ U R                   R                  U R                  S   U R                  S   S-   S5      mTS-  S:X  a  TS-  mU R                   R                  " U R                  6 m[
        R                  " SSS9   [
        R                  " UU4S jTT/5      nSSS5        TS-  S[
        R                  -  -  WTS-
  S-  TS-
  S-  4'   UR                  [
        R                  5      [
        R                  " U5      -  nS	U0$ ! , (       d  f       Nr= f)
zGenerate parameters for the RingingOvershoot transform.

Returns:
    dict[str, np.ndarray]: The parameters of the transform.

r   r   r   ignore)divideinvalidc           
     &  > T[         R                  " T[        R                  " U TS-
  S-  -
  S-  UTS-
  S-  -
  S-  -   5      -  5      -  S[        R                  -  [        R                  " U TS-
  S-  -
  S-  UTS-
  S-  -
  S-  -   5      -  -  $ )Nr   r   )r   j1r   r  r  )r:  r;  r  ksizes     rq   <lambda>-RingingOvershoot.get_params.<locals>.<lambda>  s    V**RWWa519/&9a%?1PQ	UVCV[\B\%\]] ruu9rwwUQY!O(;'AQ%RS)WXEX]^D^'^__	art   Nr[  ro  )r   	randranger  r   r  r   errstatefromfunctionr  r'  r   sum)rp   ro  r  r  s     @@rq   r   RingingOvershoot.get_params  s     (();T__Q=ORS=SUVW19>QJE''5 [[(;__a
 F < 6<QY!bee)5L	a%!)!112 rzz*RVVF^;&!! <;s   D
Dc                .    [         R                  " X5      $ )a  Apply the RingingOvershoot transform to the input image.

Args:
    img (np.ndarray): The input image to apply the RingingOvershoot transform to.
    kernel (np.ndarray): The kernel for the convolution.
    **params (Any): Additional parameters (not used in this transform).

r  )rp   r   ro  r   s       rq   r   RingingOvershoot.apply  s     s++rt   )r  r  )r  r  r  r   r   r   r  )r   r   ro  r   r   r   rw   r   )rx   ry   rz   r{   r   r   r~   r   r  r   r   r   r}   r   r   s   @rq   rP   rP   -  sk    Pd
^ 
 -4')uuqy"%%!)&<	) $ 	 "<	, 	,rt   rP   c                     ^  \ rS rSrSr " S S\5      r     S	         S
U 4S jjjr      SS jr            SS jr	Sr
U =r$ )rZ   i  a*  Sharpen the input image using Unsharp Masking processing and overlays the result with the original image.

Unsharp masking is a technique that enhances edge contrast in an image, creating the illusion of increased
    sharpness.
This transform applies Gaussian blur to create a blurred version of the image, then uses this to create a mask
which is combined with the original image to enhance edges and fine details.

Args:
    blur_limit (tuple[int, int] | int): maximum Gaussian kernel size for blurring the input image.
        Must be zero or odd and in range [0, inf). If set to 0 it will be computed from sigma
        as `round(sigma * (3 if img.dtype == np.uint8 else 4) * 2 + 1) + 1`.
        If set single value `blur_limit` will be in range (0, blur_limit).
        Default: (3, 7).
    sigma_limit (tuple[float, float] | float): Gaussian kernel standard deviation. Must be more or equal to 0.
        If set single value `sigma_limit` will be in range (0, sigma_limit).
        If set to 0 sigma will be computed as `sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8`. Default: 0.
    alpha (tuple[float, float]): range to choose the visibility of the sharpened image.
        At 0, only the original image is visible, at 1.0 only its sharpened version is visible.
        Default: (0.2, 0.5).
    threshold (int): Value to limit sharpening only for areas with high pixel difference between original image
        and it's smoothed version. Higher threshold means less sharpening on flat areas.
        Must be in range [0, 255]. Default: 10.
    p (float): probability of applying the transform. Default: 0.5.

Targets:
    image, volume

Image types:
    uint8, float32

Note:
    - The algorithm creates a mask M = (I - G) * alpha, where I is the original image and G is the Gaussian
        blurred version.
    - The final image is computed as: output = I + M if |I - G| > threshold, else I.
    - Higher alpha values increase the strength of the sharpening effect.
    - Higher threshold values limit the sharpening effect to areas with more significant edges or details.
    - The blur_limit and sigma_limit parameters control the Gaussian blur used to create the mask.

References:
    Unsharp Masking: https://en.wikipedia.org/wiki/Unsharp_masking

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>>
    # Apply UnsharpMask with default parameters
    >>> transform = A.UnsharpMask(p=1.0)
    >>> sharpened_image = transform(image=image)['image']
    >>>
    # Apply UnsharpMask with custom parameters
    >>> transform = A.UnsharpMask(
    ...     blur_limit=(3, 7),
    ...     sigma_limit=(0.1, 0.5),
    ...     alpha=(0.2, 0.7),
    ...     threshold=15,
    ...     p=1.0
    ... )
    >>> sharpened_image = transform(image=image)['image']

c                      \ rS rSr% S\S'   S\S'   \" SSS9rS	\S
'   S\S'   \" S5      \      SS j5       5       r	Sr
g)UnsharpMask.InitSchemai  r!   sigma_limitr%   r  r   r)  ro  r   r  r  r  c                ,    [         R                  " XSS9$ )Nr[   )	min_value)fblurprocess_blur_limit)r'  rD  rE  s      rq   _process_blur$UnsharpMask.InitSchema._process_blur  s     ++E1EErt   ru   N)rD  r  rE  r   rw   r   )rx   ry   rz   r{   r|   r   r  r   r+  r  r}   ru   rt   rq   r~   r    se    ..!,	3,))		&		F(	F !	F 		F 
 
'	Frt   r~   c                   > [         TU ]  US9  [        SU5      U l        [        SU5      U l        [        SU5      U l        X@l        g )Nr   r   r   )r   r   r	   r  r  r  r  )rp   r  r  r  r  r   r   s         rq   r   UnsharpMask.__init__
  sH     	10*= 5{C/7
"rt   c                   U R                   R                  U R                  S   U R                  S   S-   S5      U R                   R                  " U R                  6 U R                   R                  " U R
                  6 S.$ )zsGenerate parameters for the UnsharpMask transform.

Returns:
    dict[str, Any]: The parameters of the transform.

r   r   r   )r  r  r  )r   r  r  r   r  r  r  s      rq   r   (UnsharpMask.get_params_dependent_on_data  sp     ^^--""Q&
 ^^++T-=-=>^^++TZZ8
 	
rt   c                F    [         R                  " UUUUU R                  S9$ )a  Apply the UnsharpMask transform to the input image.

Args:
    img (np.ndarray): The input image to apply the UnsharpMask transform to.
    ksize (int): The kernel size for the convolution.
    sigma (int): The standard deviation for the Gaussian blur.
    alpha (float): The visibility of the sharpened image.
    **params (Any): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied UnsharpMask transform.

)r  r  r  )r   unsharp_maskr  )rp   r   r  r  r  r   s         rq   r   UnsharpMask.apply-  s*    * ""nn
 	
rt   )r  r  r  r  ))r[   rg  r  rw  r  r   )
r  r  r  r  r  r  r  r   r   r   rj  )r   r   r  r   r  r   r  r   r   r   rw   r   rx   ry   rz   r{   r   r(   r~   r   r   r   r}   r   r   s   @rq   rZ   rZ     s    <|F, F" -336-7#)# 1# +	#
 # # #

 
 
	
*

 
 	

 
 
 

 
rt   rZ   c                     ^  \ rS rSrSr " S S\5      r        S	               S
U 4S jjjr      SS jr      SS jr	Sr
U =r$ )rU   iK  aK  Apply spatter transform. It simulates corruption which can occlude a lens in the form of rain or mud.

Args:
    mean (tuple[float, float] | float): Mean value of normal distribution for generating liquid layer.
        If single float mean will be sampled from `(0, mean)`
        If tuple of float mean will be sampled from range `(mean[0], mean[1])`.
        If you want constant value use (mean, mean).
        Default (0.65, 0.65)
    std (tuple[float, float] | float): Standard deviation value of normal distribution for generating liquid layer.
        If single float the number will be sampled from `(0, std)`.
        If tuple of float std will be sampled from range `(std[0], std[1])`.
        If you want constant value use (std, std).
        Default: (0.3, 0.3).
    gauss_sigma (tuple[float, float] | floats): Sigma value for gaussian filtering of liquid layer.
        If single float the number will be sampled from `(0, gauss_sigma)`.
        If tuple of float gauss_sigma will be sampled from range `(gauss_sigma[0], gauss_sigma[1])`.
        If you want constant value use (gauss_sigma, gauss_sigma).
        Default: (2, 3).
    cutout_threshold (tuple[float, float] | floats): Threshold for filtering liquid layer
        (determines number of drops). If single float it will used as cutout_threshold.
        If single float the number will be sampled from `(0, cutout_threshold)`.
        If tuple of float cutout_threshold will be sampled from range `(cutout_threshold[0], cutout_threshold[1])`.
        If you want constant value use `(cutout_threshold, cutout_threshold)`.
        Default: (0.68, 0.68).
    intensity (tuple[float, float] | floats): Intensity of corruption.
        If single float the number will be sampled from `(0, intensity)`.
        If tuple of float intensity will be sampled from range `(intensity[0], intensity[1])`.
        If you want constant value use `(intensity, intensity)`.
        Default: (0.6, 0.6).
    mode (Literal["rain", "mud"]): Type of corruption. Default: "rain".
    color (tuple[int, ...] | None): Corruption elements color.
        If list uses provided list as color for the effect.
        If None uses default colors based on mode (rain: (238, 238, 175), mud: (20, 42, 63)).
    p (float): probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

References:
    Benchmarking Neural Network Robustness to Common Corruptions and Perturbations: https://arxiv.org/abs/1903.12261

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create a sample image
    >>> image = np.ones((300, 300, 3), dtype=np.uint8) * 200  # Light gray background
    >>> # Add some gradient to make effects more visible
    >>> for i in range(300):
    ...     image[i, :, :] = np.clip(image[i, :, :] - i // 3, 0, 255)
    >>>
    >>> # Example 1: Rain effect with default parameters
    >>> rain_transform = A.Spatter(
    ...     mode="rain",
    ...     p=1.0
    ... )
    >>> rain_result = rain_transform(image=image)
    >>> rain_image = rain_result['image']  # Image with rain drops
    >>>
    >>> # Example 2: Heavy rain with custom parameters
    >>> heavy_rain = A.Spatter(
    ...     mode="rain",
    ...     mean=(0.7, 0.7),             # Higher mean = more coverage
    ...     std=(0.2, 0.2),              # Lower std = more uniform effect
    ...     cutout_threshold=(0.65, 0.65),  # Lower threshold = more drops
    ...     intensity=(0.8, 0.8),        # Higher intensity = more visible effect
    ...     color=(200, 200, 255),       # Blueish rain drops
    ...     p=1.0
    ... )
    >>> heavy_rain_result = heavy_rain(image=image)
    >>> heavy_rain_image = heavy_rain_result['image']
    >>>
    >>> # Example 3: Mud effect
    >>> mud_transform = A.Spatter(
    ...     mode="mud",
    ...     mean=(0.6, 0.6),
    ...     std=(0.3, 0.3),
    ...     cutout_threshold=(0.62, 0.62),
    ...     intensity=(0.7, 0.7),
    ...     p=1.0
    ... )
    >>> mud_result = mud_transform(image=image)
    >>> mud_image = mud_result['image']  # Image with mud splatters
    >>>
    >>> # Example 4: Custom colored mud
    >>> red_mud = A.Spatter(
    ...     mode="mud",
    ...     mean=(0.55, 0.55),
    ...     std=(0.25, 0.25),
    ...     cutout_threshold=(0.7, 0.7),
    ...     intensity=(0.6, 0.6),
    ...     color=(120, 40, 40),  # Reddish-brown mud
    ...     p=1.0
    ... )
    >>> red_mud_result = red_mud(image=image)
    >>> red_mud_image = red_mud_result['image']
    >>>
    >>> # Example 5: Random effect (50% chance of applying)
    >>> random_spatter = A.Compose([
    ...     A.Spatter(
    ...         mode="rain" if np.random.random() < 0.5 else "mud",
    ...         p=0.5
    ...     )
    ... ])
    >>> random_result = random_spatter(image=image)
    >>> result_image = random_result['image']  # May or may not have spatter effect

c                  x    \ rS rSr% S\S'   S\S'   S\S'   S\S'   S\S'   S	\S
'   S\S'   \" SS9SS j5       rSrg)Spatter.InitSchemai  r%   rc   rd   r!   gauss_sigmacutout_thresholdrw  Literal['rain', 'mud']rk   zSequence[int] | Nonecolorri   rj   c                    / SQ/ SQS.nU R                   c  XR                     U l         U $ [        U R                   5      [        :w  a  Sn[	        U5      eU $ )N)   r     )r\   *   ?   )rainmudz6Color must be a list of three integers for RGB format.)r  rk   r  r+   rn   )rp   default_colorsr  s      rq   _check_colorSpatter.InitSchema._check_color  sT     '6lKNzz!+II6

 K TZZ$44N o%Krt   )r  Nrv   )rx   ry   rz   r{   r|   r   r  r}   ru   rt   rq   r~   r    sC    ..**##$$##	g	&	 
'	rt   r~   c	                   > [         T	U ]  US9  [        SU5      U l        [        SU5      U l        [        SU5      U l        [        SU5      U l        [        SU5      U l        X`l        [        SU5      U l	        g )Nr   r   r  )
r   r   r	   rc   rd   r  r  rw  rk   r  )
rp   rc   rd   r  r  rw  rk   r  r   r   s
            rq   r   Spatter.__init__  sw     	1.5	-s3 5{C $%:<L M3Y?	+U3
rt   c                    [        U5        US   S:X  a  [        R                  " XS   5      $ [        R                  " XS   US   5      $ )a  Apply the Spatter transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Spatter transform to.
    **params (dict[str, Any]): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied Spatter transform.

rk   r  dropsnon_mudr  )r    r   spatter_rainspatter_mudr   s      rq   r   Spatter.apply  sI     	c&>V#&&s7O<<!!#i'8&-HHrt   c           
     .   US   SS u  p4U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R
                  6 nU R                  n	U R                   R                  " U R                  6 n
[        R                  " U R                  5      S-  nU R                  R                  X44UUS9n[        S[        SU-  5      -  S-   5      n[        R                   " UUX4UU[        R"                  S9  S	XU:  '   U	S
:X  a  SS
0[$        R&                  " XU
S9E$ SS0[$        R(                  " UUUUU
U R                  S9E$ )zoGenerate parameters for the Spatter transform.

Returns:
    dict[str, Any]: The parameters of the transform.

r   Nr   r   )ra  r  r  r[   r   )srcdstr  sigmaXsigmaY
borderTyper   r  rk   )liquid_layerr  rw  r  )r  r  r  r  rw  r   )r   r   rc   rd   r  r  rk   rw  r   r   r  r   r  r   roundr  GaussianBlurBORDER_REPLICATEr   get_rain_paramsget_mud_params)rp   r   r   r/  r0  rc   rd   r  r  rk   rw  r  r  r  s                 rq   r   $Spatter.get_params_dependent_on_data  s    w+~~%%tyy1nn$$dhh/>>1143H3HI&&(8(89yyNN**DNN;	$u,,,33 4 
 Aa%i((1,-.++	
 9:$4456>((l[de  E

##)!1#!%!6!6

 
	
rt   )r  r  r  rw  rc   rk   rd   ))?r  )r   r   )r   r   )(\?r  )333333?r  r  Nr   )rc   r  rd   r  r  r  r  r  rw  r  rk   r  r  ztuple[int, ...] | Noner   r   )r   r   r   r   rw   r   rj  r   r   s   @rq   rU   rU   K  s    ob, 2 -9+5398D1;'-(,4)4 )4 1	4
 64 /4 %4 &4 4 4(II !I 
	I,8
8
 8
 
	8
 8
rt   rU   c                     ^  \ rS rSrSr " S S\5      rSSS\R                  S4         SU 4S	 jjjr	              SS
 jr
SS jr\SS j5       r\SS j5       rSrU =r$ )r3   i9  a  Add lateral chromatic aberration by distorting the red and blue channels of the input image.

Chromatic aberration is an optical effect that occurs when a lens fails to focus all colors to the same point.
This transform simulates this effect by applying different radial distortions to the red and blue channels
of the image, while leaving the green channel unchanged.

Args:
    primary_distortion_limit (tuple[float, float] | float): Range of the primary radial distortion coefficient.
        If a single float value is provided, the range
        will be (-primary_distortion_limit, primary_distortion_limit).
        This parameter controls the distortion in the center of the image:
        - Positive values result in pincushion distortion (edges bend inward)
        - Negative values result in barrel distortion (edges bend outward)
        Default: (-0.02, 0.02).

    secondary_distortion_limit (tuple[float, float] | float): Range of the secondary radial distortion coefficient.
        If a single float value is provided, the range
        will be (-secondary_distortion_limit, secondary_distortion_limit).
        This parameter controls the distortion in the corners of the image:
        - Positive values enhance pincushion distortion
        - Negative values enhance barrel distortion
        Default: (-0.05, 0.05).

    mode (Literal["green_purple", "red_blue", "random"]): Type of color fringing to apply. Options are:
        - 'green_purple': Distorts red and blue channels in opposite directions, creating green-purple fringing.
        - 'red_blue': Distorts red and blue channels in the same direction, creating red-blue fringing.
        - 'random': Randomly chooses between 'green_purple' and 'red_blue' modes for each application.
        Default: 'green_purple'.

    interpolation (InterpolationType): Flag specifying the interpolation algorithm. Should be one of:
        cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
        Default: cv2.INTER_LINEAR.

    p (float): Probability of applying the transform. Should be in the range [0, 1].
        Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - This transform only affects RGB images. Grayscale images will raise an error.
    - The strength of the effect depends on both primary and secondary distortion limits.
    - Higher absolute values for distortion limits will result in more pronounced chromatic aberration.
    - The 'green_purple' mode tends to produce more noticeable effects than 'red_blue'.

Examples:
    >>> import albumentations as A
    >>> import cv2
    >>> transform = A.ChromaticAberration(
    ...     primary_distortion_limit=0.05,
    ...     secondary_distortion_limit=0.1,
    ...     mode='green_purple',
    ...     interpolation=cv2.INTER_LINEAR,
    ...     p=1.0
    ... )
    >>> transformed = transform(image=image)
    >>> aberrated_image = transformed['image']

References:
    Chromatic Aberration: https://en.wikipedia.org/wiki/Chromatic_aberration

c                  >    \ rS rSr% S\S'   S\S'   S\S'   S\S'   S	rg
)ChromaticAberration.InitSchemai  r$   primary_distortion_limitsecondary_distortion_limit-Literal['green_purple', 'red_blue', 'random']rk   r  r  ru   Nr   ru   rt   rq   r~   r    s    "44$66;;
 	
rt   r~   )g{Gzg{Gz?)gr  green_purpler   c                |   > [         TU ]  US9  [        SU5      U l        [        SU5      U l        X0l        X@l        g r  )r   r   r	   r  r  rk   r  )rp   r  r  rk   r  r   r   s         rq   r   ChromaticAberration.__init__  sJ      	1(,!$)
% +/!&+
' 	*rt   c                b    [        U5        [        R                  " UUUUUU R                  5      $ )a  Apply the ChromaticAberration transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ChromaticAberration transform to.
    primary_distortion_red (float): The primary distortion coefficient for the red channel.
    secondary_distortion_red (float): The secondary distortion coefficient for the red channel.
    primary_distortion_blue (float): The primary distortion coefficient for the blue channel.
    secondary_distortion_blue (float): The secondary distortion coefficient for the blue channel.
    **params (dict[str, Any]): Additional parameters (not used in this transform).

Returns:
    np.ndarray: The image with the applied ChromaticAberration transform.

)r    r   chromatic_aberrationr  )rp   r   primary_distortion_redsecondary_distortion_redprimary_distortion_bluesecondary_distortion_bluer   s          rq   r   ChromaticAberration.apply  s7    . 	c**"$#%
 	
rt   c                @   U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R	                  UU5      nU R	                  UU5      nU R
                  S:X  a$  U R	                  UU5      nU R	                  UU5      nU R
                  S:X  a$  U R                  UU5      nU R                  UU5      nUUUUS.$ )z}Generate parameters for the ChromaticAberration transform.

Returns:
    dict[str, float]: The parameters of the transform.

r  red_blue)r  r  r  r   )r   r   r  r  _match_signrk   _unmatch_sign)rp   r  r  r  r   s        rq   r   ChromaticAberration.get_params  s>    "&!7!79V9V!W#'>>#9#9,,$
  #'.."8"8$:W:W"X$(NN$:$:,,%
! $(#3#3"$$
  %)$4$4#%%
!
 99&&*&6&6&''# )-(8(8())% 99
"&*&8&8&''# )-(:(:())% '=(@'>)B	
 	
rt   c                R    U Ss=:  a  U:  d  O  U Ss=:  a	  U:  a
   U* $   U$ U* $ U$ Nr   ru   abs     rq   r  ChromaticAberration._match_sign  s8     IAI1q9192I % 2Irt   c                <    U S:  a  US:  d  U S:  a	  US:  a  U* $ U$ r  ru   r	  s     rq   r  !ChromaticAberration._unmatch_sign  s)     Ea!eQ1q52Irt   )r  rk   r  r  )
r  r  r  r  rk   r  r  r  r   r   )r   r   r  r   r  r   r  r   r   r   r   r   rw   r   r  )r
  r   r  r   rw   r   )rx   ry   rz   r{   r   r(   r~   r  r  r   r   r   rx  r  r  r}   r   r   s   @rq   r3   r3   9  s    CJ
, 
  ANBO>L +"=+ %@+ <	+

+ + +8

 !&
 #(	

 "'
 $)
 
 

B3
j    rt   r3   	blackbodyciedip  r?  )MAX_TEMPMIN_BLACKBODY_TEMPMIN_CIED_TEMP
WHITE_TEMPSAMPLING_TEMP_PROBc                  x   ^  \ rS rSrSr " S S\5      r    S	         S
U 4S jjjrSS jrSS jr	Sr
U =r$ )rB   i  a9  Applies Planckian Jitter to the input image, simulating color temperature variations in illumination.

This transform adjusts the color of an image to mimic the effect of different color temperatures
of light sources, based on Planck's law of black body radiation. It can simulate the appearance
of an image under various lighting conditions, from warm (reddish) to cool (bluish) color casts.

PlanckianJitter vs. ColorJitter:
PlanckianJitter is fundamentally different from ColorJitter in its approach and use cases:
1. Physics-based: PlanckianJitter is grounded in the physics of light, simulating real-world
   color temperature changes. ColorJitter applies arbitrary color adjustments.
2. Natural effects: This transform produces color shifts that correspond to natural lighting
   variations, making it ideal for outdoor scene simulation or color constancy problems.
3. Single parameter: Color changes are controlled by a single, physically meaningful parameter
   (color temperature), unlike ColorJitter's multiple abstract parameters.
4. Correlated changes: Color shifts are correlated across channels in a way that mimics natural
   light, whereas ColorJitter can make independent channel adjustments.

When to use PlanckianJitter:
- Simulating different times of day or lighting conditions in outdoor scenes
- Augmenting data for computer vision tasks that need to be robust to natural lighting changes
- Preparing synthetic data to better match real-world lighting variations
- Color constancy research or applications
- When you need physically plausible color variations rather than arbitrary color changes

The logic behind PlanckianJitter:
As the color temperature increases:
1. Lower temperatures (around 3000K) produce warm, reddish tones, simulating sunset or incandescent lighting.
2. Mid-range temperatures (around 5500K) correspond to daylight.
3. Higher temperatures (above 7000K) result in cool, bluish tones, similar to overcast sky or shade.
This progression mimics the natural variation of sunlight throughout the day and in different weather conditions.

Args:
    mode (Literal["blackbody", "cied"]): The mode of the transformation.
        - "blackbody": Simulates blackbody radiation color changes.
        - "cied": Uses the CIE D illuminant series for color temperature simulation.
        Default: "blackbody"

    temperature_limit (tuple[int, int] | None): The range of color temperatures (in Kelvin) to sample from.
        - For "blackbody" mode: Should be within [3000K, 15000K]. Default: (3000, 15000)
        - For "cied" mode: Should be within [4000K, 15000K]. Default: (4000, 15000)
        If None, the default ranges will be used based on the selected mode.
        Higher temperatures produce cooler (bluish) images, lower temperatures produce warmer (reddish) images.

    sampling_method (Literal["uniform", "gaussian"]): Method to sample the temperature.
        - "uniform": Samples uniformly across the specified range.
        - "gaussian": Samples from a Gaussian distribution centered at 6500K (approximate daylight).
        Default: "uniform"

    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image

Image types:
    uint8, float32

Number of channels:
    3

Note:
    - The transform preserves the overall brightness of the image while shifting its color.
    - The "blackbody" mode provides a wider range of color shifts, especially in the lower (warmer) temperatures.
    - The "cied" mode is based on standard illuminants and may provide more realistic daylight variations.
    - The Gaussian sampling method tends to produce more subtle variations, as it's centered around daylight.
    - Unlike ColorJitter, this transform ensures that color changes are physically plausible and correlated
      across channels, maintaining the natural appearance of the scene under different lighting conditions.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
    >>> transform = A.PlanckianJitter(mode="blackbody",
    ...                               temperature_range=(3000, 9000),
    ...                               sampling_method="uniform",
    ...                               p=1.0)
    >>> result = transform(image=image)
    >>> jittered_image = result["image"]

References:
    - Planck's law: https://en.wikipedia.org/wiki/Planck%27s_law
    - CIE Standard Illuminants: https://en.wikipedia.org/wiki/Standard_illuminant
    - Color temperature: https://en.wikipedia.org/wiki/Color_temperature
    - Implementation inspired by: https://github.com/TheZino/PlanckianJitter

c                  P    \ rS rSr% S\S'   S\S'   S\S'   \" SS	9SS
 j5       rSrg)PlanckianJitter.InitSchemaiq  Literal['blackbody', 'cied']rk   z@Annotated[tuple[int, int], AfterValidator(nondecreasing)] | Nonetemperature_limitLiteral['uniform', 'gaussian']sampling_methodri   rj   c                   [        [        S   5      nU R                  cV  U R                  S:X  a  [        [        S   5      U4U l        U $ U R                  S:X  a  [        [        S   5      U4U l        U $ U R                  S:X  aD  [	        U R                  5      [        S   :  d  [        U R                  5      U:  a  [        S5      eU R                  S:X  aD  [	        U R                  5      [        S   :  d  [        U R                  5      U:  a  [        S5      eU R                  S   [        S	   s=::  a  U R                  S
   ::  d  O  [        S5      eU $ )Nr  r  r  r  r  zATemperature limits for blackbody should be in [3000, 15000] rangez<Temperature limits for CIED should be in [4000, 15000] ranger   r  r   z9White temperature should be within the temperature limits)r   PLANKIAN_JITTER_CONSTr  rk   r.  r-  rn   )rp   max_temps     rq   _validate_temperature0PlanckianJitter.InitSchema._validate_temperaturev  sV   0<=H%%-99+12FGH .D*< K5 YY&(1/BC .D*2 K) 99+../2GH\2]]4112X=$[  99&../2G2XX4112X=$V  --a04I,4Wt[_[q[qrs[tt$S  Krt   )r  Nrv   )rx   ry   rz   r{   r|   r   r   r}   ru   rt   rq   r~   r  q  s+    **[[77	g	&#	 
'#	rt   r~   c                Z   > [         TU ]  US9  Xl        [        SU5      U l        X0l        g r  )r   r   rk   r	   r  r  )rp   rk   r  r  r   r   s        rq   r   PlanckianJitter.__init__  s1     	1	!%&79J!K.rt   c                V    [        U5        [        R                  " XU R                  S9$ )a  Apply the PlanckianJitter transform to the input image.

Args:
    img (np.ndarray): The input image to apply the PlanckianJitter transform to.
    temperature (int): The temperature to apply to the image.
    **params (Any): Additional parameters for the transform.

rj   )r    r   planckian_jitterrk   )rp   r   temperaturer   s       rq   r   PlanckianJitter.apply  s#     	c&&sdiiHHrt   c           
        [         S   n[         S   nU R                  S:X  as  U R                  R                  5       U:  a+  U R                  R	                  U R
                  S   U5      nGO'U R                  R	                  UU R
                  S   5      nOU R                  S:X  a  U R                  R                  5       U:  a[  [        R                  " U R                  R                  S[        R                  " X R
                  S   -
  5      S-  5      5      nX$-
  nOt[        R                  " U R                  R                  S[        R                  " U R
                  S   U-
  5      S-  5      5      nX$-   nO[        SU R                   35      e[        R                  " UU R
                  S   U R
                  S   5      nS	[        U5      0$ )
zwGenerate parameters for the PlanckianJitter transform.

Returns:
    dict[str, Any]: The parameters of the transform.

r  r  r   r   r   r  r[   zUnknown sampling method: r&  )r  r  r   r  r   r  r   r  gaussrn   r  r   )rp   sampling_prob_boundarysampling_temp_boundaryr&  shifts        rq   r   PlanckianJitter.get_params  s    "77K!L!6|!D9,~~$$&)??"nn44**1-*
 #nn44***1- !!Z/~~$$&)??NN((58N8Nq8QQRUVV 5< NN((t55a8;QQRUVV 5<89M9M8NOPP gg""1%""1%
 s;/00rt   )rk   r  r  )r  Nr   r   )
rk   r  r  ztuple[int, int] | Noner  r  r   r   rw   None)r   r   r&  r   r   r   rw   r   r4  r   r   s   @rq   rB   rB     sp    Tl), )Z .948:C/*/ 2/ 8	/
 / 
/ /
I41 41rt   rB   c                  |   ^  \ rS rSrSr " S S\5      r  S	   S
U 4S jjjr          SS jrSS jr	Sr
U =r$ )rS   i  a  Apply shot noise to the image by modeling photon counting as a Poisson process.

Shot noise (also known as Poisson noise) occurs in imaging due to the quantum nature of light.
When photons hit an imaging sensor, they arrive at random times following Poisson statistics.
This transform simulates this physical process in linear light space by:
1. Converting to linear space (removing gamma)
2. Treating each pixel value as an expected photon count
3. Sampling actual photon counts from a Poisson distribution
4. Converting back to display space (reapplying gamma)

The noise characteristics follow real camera behavior:
- Noise variance equals signal mean in linear space (Poisson statistics)
- Brighter regions have more absolute noise but less relative noise
- Darker regions have less absolute noise but more relative noise
- Noise is generated independently for each pixel and color channel

Args:
    scale_range (tuple[float, float]): Range for sampling the noise scale factor.
        Represents the reciprocal of the expected photon count per unit intensity.
        Higher values mean more noise:
        - scale = 0.1: ~100 photons per unit intensity (low noise)
        - scale = 1.0: ~1 photon per unit intensity (moderate noise)
        - scale = 10.0: ~0.1 photons per unit intensity (high noise)
        Default: (0.1, 0.3)
    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image

Image types:
    uint8, float32

Note:
    - Performs calculations in linear light space (gamma = 2.2)
    - Preserves the image's mean intensity
    - Memory efficient with in-place operations
    - Thread-safe with independent random seeds

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> # Generate synthetic image
    >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
    >>> # Apply moderate shot noise
    >>> transform = A.ShotNoise(scale_range=(0.1, 1.0), p=1.0)
    >>> noisy_image = transform(image=image)["image"]

References:
    - Shot noise: https://en.wikipedia.org/wiki/Shot_noise
    - Original paper: https://doi.org/10.1002/andp.19183622304 (Schottky, 1918)
    - Poisson process: https://en.wikipedia.org/wiki/Poisson_point_process
    - Gamma correction: https://en.wikipedia.org/wiki/Gamma_correction

c                       \ rS rSr% S\S'   Srg)ShotNoise.InitSchemai$  jAnnotated[tuple[float, float], AfterValidator(nondecreasing), AfterValidator(check_range_bounds(0, None))]r  ru   Nr   ru   rt   rq   r~   r1  $  s    
 	
rt   r~   c                ,   > [         TU ]  US9  Xl        g r   )r   r   r  )rp   r  r   r   s      rq   r   ShotNoise.__init__+  s    
 	1&rt   c                j    [         R                  " X[        R                  R	                  U5      5      $ )a+  Apply the ShotNoise transform to the input image.

Args:
    img (np.ndarray): The input image to apply the ShotNoise transform to.
    scale (float): The scale factor for the noise.
    random_seed (int): The random seed for the noise.
    **params (Any): Additional parameters for the transform.

)r   
shot_noiser   r  r  )rp   r   r  r  r   s        rq   r   ShotNoise.apply3  s&        RYY-B-B;-OPPrt   c                    U R                   R                  " U R                  6 U R                  R	                  SS5      S.$ )zqGenerate parameters for the ShotNoise transform.

Returns:
    dict[str, Any]: The parameters of the transform.

r   r  )r  r  )r   r   r  r   r  ro   s    rq   r   ShotNoise.get_paramsE  s<     ^^++T-=-=>0099!YG
 	
rt   )r  )r   r   )r  r   r   r   )
r   r   r  r   r  r   r   r   rw   r   r4  r   r   s   @rq   rS   rS     s    5n
, 
 ,6'(' ' 'QQ Q 	Q
 Q 
Q$

 

rt   rS   c                  0    \ rS rSr% Sr\" SS9rS\S'   Srg)	NoiseParamsBaseiR  z*Base class for all noise parameter models.forbid)extrastrr  ru   N)	rx   ry   rz   r{   r   r   model_configr|   r}   ru   rt   rq   r;  r;  R  s    4H-LOrt   r;  c                  b    \ rS rSr% SrS\S'   \" SS9rS\S'   \" SS	S
9\	SS j5       5       r
Srg)UniformParamsiY  r   zLiteral['uniform']r  r   )
min_lengthlist[Sequence[float]]rangesri   rj   c                    / nU Hk  n[        U5      [        :w  a  [        S5      eUu  pESUs=::  a  Us=::  a  S::  d  O  [        S5      eUR                  [	        U5      [	        U5      45        Mm     U$ )Nz%Each range must have exactly 2 valuesr  r   z.Range values must be in [-1, 1] and min <= max)r  r,   rn   r,  r   )r'  vr   range_valuesmin_valmax_vals         rq   validate_rangesUniformParams.validate_ranges]  sr     L< D( !HII+G'1W11 !QRRMM5>5>:;  rt   ru   N)rF  rC  rw   zlist[tuple[float, float]])rx   ry   rz   r{   r  r|   r   rD  r   r+  rJ  r}   ru   rt   rq   rA  rA  Y  s>    %.J".$)Q$7F!7XG,	  -	rt   rA  c                  8    \ rS rSr% SrS\S'   S\S'   S\S'   S	rg
)GaussianParamsik  r  zLiteral['gaussian']r  UAnnotated[Sequence[float], AfterValidator(check_range_bounds(min_val=-1, max_val=1))]ru  TAnnotated[Sequence[float], AfterValidator(check_range_bounds(min_val=0, max_val=1))]rt  ru   Nrx   ry   rz   r{   r  r|   r}   ru   rt   rq   rM  rM  k  s!    &0J#0  rt   rM  c                  8    \ rS rSr% SrS\S'   S\S'   S\S'   S	rg
)LaplaceParamsiw  laplacezLiteral['laplace']r  rN  ru  rO  r  ru   NrP  ru   rt   rq   rR  rR  w  s!    %.J".  rt   rR  c                  B    \ rS rSr% SrS\S'   S\S'   S\S'   S\S	'   S
rg)
BetaParamsi  rZ  zLiteral['beta']r  zIAnnotated[Sequence[float], AfterValidator(check_range_bounds(min_val=0))]alpha_range
beta_rangerO  r  ru   NrP  ru   rt   rq   rU  rU    s*    "(J(   rt   rU  r  )discriminatorc                     ^  \ rS rSrSr " S S\5      r     S	         S
U 4S jjjr        SS jr      SS jr	Sr
U =r$ )r0   i  ab  Apply random noise to image channels using various noise distributions.

This transform generates noise using different probability distributions and applies it
to image channels. The noise can be generated in three spatial modes and supports
multiple noise distributions, each with configurable parameters.

Args:
    noise_type(Literal["uniform", "gaussian", "laplace", "beta"]): Type of noise distribution to use. Options:
        - "uniform": Uniform distribution, good for simple random perturbations
        - "gaussian": Normal distribution, models natural random processes
        - "laplace": Similar to Gaussian but with heavier tails, good for outliers
        - "beta": Flexible bounded distribution, can be symmetric or skewed

    spatial_mode(Literal["constant", "per_pixel", "shared"]): How to generate and apply the noise. Options:
        - "constant": One noise value per channel, fastest
        - "per_pixel": Independent noise value for each pixel and channel, slowest
        - "shared": One noise map shared across all channels, medium speed

    approximation(float): float in [0, 1], default=1.0
        Controls noise generation speed vs quality tradeoff.
        - 1.0: Generate full resolution noise (slowest, highest quality)
        - 0.5: Generate noise at half resolution and upsample
        - 0.25: Generate noise at quarter resolution and upsample
        Only affects 'per_pixel' and 'shared' spatial modes.

    noise_params(dict[str, Any] | None): Parameters for the chosen noise distribution.
        Must match the noise_type:

        uniform:
            ranges: list[tuple[float, float]]
                List of (min, max) ranges for each channel.
                Each range must be in [-1, 1].
                If only one range is provided, it will be used for all channels.

                [(-0.2, 0.2)]  # Same range for all channels
                [(-0.2, 0.2), (-0.1, 0.1), (-0.1, 0.1)]  # Different ranges for RGB

        gaussian:
            mean_range: tuple[float, float], default (0.0, 0.0)
                Range for sampling mean value, in [-1, 1]
            std_range: tuple[float, float], default (0.1, 0.1)
                Range for sampling standard deviation, in [0, 1]

        laplace:
            mean_range: tuple[float, float], default (0.0, 0.0)
                Range for sampling location parameter, in [-1, 1]
            scale_range: tuple[float, float], default (0.1, 0.1)
                Range for sampling scale parameter, in [0, 1]

        beta:
            alpha_range: tuple[float, float], default (0.5, 1.5)
                Value < 1 = U-shaped, Value > 1 = Bell-shaped
                Range for sampling first shape parameter, in (0, inf)
            beta_range: tuple[float, float], default (0.5, 1.5)
                Value < 1 = U-shaped, Value > 1 = Bell-shaped
                Range for sampling second shape parameter, in (0, inf)
            scale_range: tuple[float, float], default (0.1, 0.3)
                Smaller scale for subtler noise
                Range for sampling output scale, in [0, 1]

Examples:
    >>> # Constant RGB shift with different ranges per channel:
    >>> transform = AdditiveNoise(
    ...     noise_type="uniform",
    ...     spatial_mode="constant",
    ...     noise_params={"ranges": [(-0.2, 0.2), (-0.1, 0.1), (-0.1, 0.1)]}
    ... )

    Gaussian noise shared across channels:
    >>> transform = AdditiveNoise(
    ...     noise_type="gaussian",
    ...     spatial_mode="shared",
    ...     noise_params={"mean_range": (0.0, 0.0), "std_range": (0.05, 0.15)}
    ... )

Note:
    Performance considerations:
        - "constant" mode is fastest as it generates only C values (C = number of channels)
        - "shared" mode generates HxW values and reuses them for all channels
        - "per_pixel" mode generates HxWxC values, slowest but most flexible

    Distribution characteristics:
        - uniform: Equal probability within range, good for simple perturbations
        - gaussian: Bell-shaped, symmetric, good for natural noise
        - laplace: Like gaussian but with heavier tails, good for outliers
        - beta: Very flexible shape, can be uniform, bell-shaped, or U-shaped

    Implementation details:
        - All noise is generated in normalized range and scaled by image max value
        - For uint8 images, final noise range is [-255, 255]
        - For float images, final noise range is [-1, 1]

c                  h    \ rS rSr% S\S'   S\S'   S\S'   \" SS	S
9rS\S'   \" SS9SS j5       rSr	g)AdditiveNoise.InitSchemai  1Literal['uniform', 'gaussian', 'laplace', 'beta']r  *Literal['constant', 'per_pixel', 'shared']r  dict[str, Any] | Nonenoise_paramsr  r   ro  r   r  ri   rj   c                "   SS/0SSS.SSS.SSSS	.S
.nU R                   b  U R                   OXR                     n0 UESU R                  0En[        [        [        [
        S
.U R                     nU" S0 UD6nUR                  5       U l         U $ )NrD  )gr   r  )r  g333333?r  )ru  r  )r   g      ?r   )rV  rW  r  )r   r  rS  rZ  r  ru   )r_  r  rA  rM  rR  rU  
model_dump)rp   default_paramsparams_dictparams_classvalidated_paramss        rq   _validate_noise_params/AdditiveNoise.InitSchema._validate_noise_params  s    
 {m ,6LQ*4\R#-",#-N 04/@/@/L$++R`apapRqK I[H,HK )*("	
 ooL  ,:k: !1 ; ; =DKrt   )r_  Nrv   )
rx   ry   rz   r{   r|   r   r  r   rf  r}   ru   rt   rq   r~   r[    s<    EE@@++$4u4	g	&#	 
'#	rt   r~   c                P   > [         TU ]  US9  Xl        X l        X0l        X@l        g r   )r   r   r  r  r_  r  )rp   r  r  r_  r  r   r   s         rq   r   AdditiveNoise.__init__$  s,     	1$((*rt   c                .    [         R                  " X5      $ )a  Apply the AdditiveNoise transform to the input image.

Args:
    img (np.ndarray): The input image to apply the AdditiveNoise transform to.
    noise_map (np.ndarray): The noise map to apply to the image.
    **params (Any): Additional parameters for the transform.

rz  r|  s       rq   r   AdditiveNoise.apply2  s     //rt   c           
        SU;   a  US   OUS   S   n[         UR                     n[        R                  " U R                  U R
                  UR                  U R                  UU R                  U R                  S9nSU0$ )zGenerate parameters for the AdditiveNoise transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r  r   r   r  r}  )
r
   r   r   r  r  r  r   r_  r  r   )rp   r   r   r  rj  r}  s         rq   r   *AdditiveNoise.get_params_dependent_on_dataB  s~     ")DWd8nQ6G'4	))**++$$,,!22
	 Y''rt   )r  r_  r  r  )r   constantNr   r   )
r  r\  r  r]  r_  r^  r  r   r   r   r  rj  r   r   s   @rq   r0   r0     s    \|*, *\ IRCM.2"+E+ A+ ,	+
 + + +00 0 	0
 
0 (( ( 
	( (rt   r0   c                  `   ^  \ rS rSrSr " S S\5      r    S       SU 4S jjjrSrU =r	$ )	rF   i^  ag  Randomly shift values for each channel of the input RGB image.

A specialized version of AdditiveNoise that applies constant uniform shifts to RGB channels.
Each channel (R,G,B) can have its own shift range specified.

Args:
    r_shift_limit ((int, int) or int): Range for shifting the red channel. Options:
        - If tuple (min, max): Sample shift value from this range
        - If int: Sample shift value from (-r_shift_limit, r_shift_limit)
        - For uint8 images: Values represent absolute shifts in [0, 255]
        - For float images: Values represent relative shifts in [0, 1]
        Default: (-20, 20)

    g_shift_limit ((int, int) or int): Range for shifting the green channel. Options:
        - If tuple (min, max): Sample shift value from this range
        - If int: Sample shift value from (-g_shift_limit, g_shift_limit)
        - For uint8 images: Values represent absolute shifts in [0, 255]
        - For float images: Values represent relative shifts in [0, 1]
        Default: (-20, 20)

    b_shift_limit ((int, int) or int): Range for shifting the blue channel. Options:
        - If tuple (min, max): Sample shift value from this range
        - If int: Sample shift value from (-b_shift_limit, b_shift_limit)
        - For uint8 images: Values represent absolute shifts in [0, 255]
        - For float images: Values represent relative shifts in [0, 1]
        Default: (-20, 20)

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Values are shifted independently for each channel
    - For uint8 images:
        * Input ranges like (-20, 20) represent pixel value shifts
        * A shift of 20 means adding 20 to that channel
        * Final values are clipped to [0, 255]
    - For float32 images:
        * Input ranges like (-0.1, 0.1) represent relative shifts
        * A shift of 0.1 means adding 0.1 to that channel
        * Final values are clipped to [0, 1]

Examples:
    >>> import numpy as np
    >>> import albumentations as A

    # Shift RGB channels of uint8 image
    >>> transform = A.RGBShift(
    ...     r_shift_limit=30,  # Will sample red shift from [-30, 30]
    ...     g_shift_limit=(-20, 20),  # Will sample green shift from [-20, 20]
    ...     b_shift_limit=(-10, 10),  # Will sample blue shift from [-10, 10]
    ...     p=1.0
    ... )
    >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> shifted = transform(image=image)["image"]

    # Same effect using AdditiveNoise
    >>> transform = A.AdditiveNoise(
    ...     noise_type="uniform",
    ...     spatial_mode="constant",  # One value per channel
    ...     noise_params={
    ...         "ranges": [(-30/255, 30/255), (-20/255, 20/255), (-10/255, 10/255)]
    ...     },
    ...     p=1.0
    ... )

See Also:
    - AdditiveNoise: More general noise transform with various options:
        * Different noise distributions (uniform, gaussian, laplace, beta)
        * Spatial modes (constant, per-pixel, shared)
        * Approximation for faster computation
    - RandomToneCurve: For non-linear color transformations
    - RandomBrightnessContrast: For combined brightness and contrast adjustments
    - PlankianJitter: For color temperature adjustments
    - HueSaturationValue: For HSV color space adjustments
    - ColorJitter: For combined brightness, contrast, saturation adjustments

c                  4    \ rS rSr% S\S'   S\S'   S\S'   Srg)RGBShift.InitSchemai  r$   r_shift_limitg_shift_limitb_shift_limitru   Nr   ru   rt   rq   r~   rq    s    ))))))rt   r~   c                  > SS jnU" [        SU5      5      U" [        SU5      5      U" [        SU5      5      /n[        TU ]	  SSSU0SUS9  [        SU5      U l        [        SU5      U l        [        SU5      U l        g )	Nr   c                n    [        U S   5      S:  d  [        U S   5      S:  a  U S   S-  U S   S-  4$ U $ )Nr   r   r   )r  )limits    rq   normalize_range*RGBShift.__init__.<locals>.normalize_range  sC    58}q CaMA$5a5(%(U*:;;Lrt   r   rn  rD  r   )r  r  r_  r  r   )rw  r   rw   r   )r	   r   r   rr  rs  rt  )rp   rr  rs  rt  r   rx  rD  r   s          rq   r   RGBShift.__init__  s    	 D!6FGD!6FGD!6FG
 	 #"F+ 	 	
 ""7G!"7G!"7Grt   )rt  rs  rr  )r  r  r  r   )rr  r  rs  r  rt  r  r   r   )
rx   ry   rz   r{   r   r(   r~   r   r}   r   r   s   @rq   rF   rF   ^  s[    Qf*, * 6?5>5> H2 H 3 H 3	 H
  H  Hrt   rF   c                     ^  \ rS rSrSr " S S\5      r   S	     S
U 4S jjjr      SS jr          SS jr	Sr
U =r$ )rQ   i  um  Apply salt and pepper noise to the input image.

Salt and pepper noise is a form of impulse noise that randomly sets pixels to either maximum value (salt)
or minimum value (pepper). The amount and proportion of salt vs pepper noise can be controlled.
The same noise mask is applied to all channels of the image to preserve color consistency.

Args:
    amount ((float, float)): Range for total amount of noise (both salt and pepper).
        Values between 0 and 1. For example:
        - 0.05 means 5% of all pixels will be replaced with noise
        - (0.01, 0.06) will sample amount uniformly from 1% to 6%
        Default: (0.01, 0.06)

    salt_vs_pepper ((float, float)): Range for ratio of salt (white) vs pepper (black) noise.
        Values between 0 and 1. For example:
        - 0.5 means equal amounts of salt and pepper
        - 0.7 means 70% of noisy pixels will be salt, 30% pepper
        - (0.4, 0.6) will sample ratio uniformly from 40% to 60%
        Default: (0.4, 0.6)

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Salt noise sets pixels to maximum value (255 for uint8, 1.0 for float32)
    - Pepper noise sets pixels to 0
    - The noise mask is generated once and applied to all channels to maintain
      color consistency (i.e., if a pixel is set to salt, all its color channels
      will be set to maximum value)
    - The exact number of affected pixels matches the specified amount as masks
      are generated without overlap

Mathematical Formulation:
    For an input image I, the output O is:
    O[c,x,y] = max_value,  if salt_mask[x,y] = True
    O[c,x,y] = 0,         if pepper_mask[x,y] = True
    O[c,x,y] = I[c,x,y],  otherwise

    where:
    - c is the channel index
    - salt_mask and pepper_mask are 2D boolean arrays applied to all channels
    - Number of True values in salt_mask = floor(H*W * amount * salt_ratio)
    - Number of True values in pepper_mask = floor(H*W * amount * (1 - salt_ratio))
    - amount ∈ [amount_min, amount_max]
    - salt_ratio ∈ [salt_vs_pepper_min, salt_vs_pepper_max]

Examples:
    >>> import albumentations as A
    >>> import numpy as np

    # Apply salt and pepper noise with default parameters
    >>> transform = A.SaltAndPepper(p=1.0)
    >>> noisy_image = transform(image=image)["image"]

    # Heavy noise with more salt than pepper
    >>> transform = A.SaltAndPepper(
    ...     amount=(0.1, 0.2),       # 10-20% of pixels will be noisy
    ...     salt_vs_pepper=(0.7, 0.9),  # 70-90% of noise will be salt
    ...     p=1.0
    ... )
    >>> noisy_image = transform(image=image)["image"]

References:
    - Digital Image Processing: Rafael C. Gonzalez and Richard E. Woods, 4th Edition,
        Chapter 5: Image Restoration and Reconstruction.
    - Fundamentals of Digital Image Processing: A. K. Jain, Chapter 7: Image Degradation and Restoration.
    - Salt and pepper noise: https://en.wikipedia.org/wiki/Salt-and-pepper_noise

See Also:
    - GaussNoise: For additive Gaussian noise
    - MultiplicativeNoise: For multiplicative noise
    - ISONoise: For camera sensor noise simulation

c                  *    \ rS rSr% S\S'   S\S'   Srg)SaltAndPepper.InitSchemai+  r\  amountsalt_vs_pepperru   Nr   ru   rt   rq   r~   r}  +  s    XX``rt   r~   c                8   > [         TU ]  US9  Xl        X l        g r   )r   r   r~  r  )rp   r~  r  r   r   s       rq   r   SaltAndPepper.__init__/  s      	1,rt   c                   SU;   a  US   OUS   S   nUR                   SS u  pEU R                  R                  " U R                  6 nU R                  R                  " U R                  6 nXE-  n[        X-  5      n	[        X-  5      n
U R                  R                  XSS9n[        R                  " U[        S9n[        R                  " U[        S9nS	XSU
 '   S	XU
S '   UR                  XE5      nUR                  XE5      nUUS
.$ )zGenerate parameters for the SaltAndPepper transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r  r   r   Nr   F)ra  replacer   T)	salt_maskpepper_mask)r   r   r   r~  r  r   r   r  r   zerosr  r)  )rp   r   r   r  r/  r0  total_amount
salt_ratior  
num_pixelsnum_saltnoise_positionsr  r  s                 rq   r   *SaltAndPepper.get_params_dependent_on_data9  s    ")DWd8nQ6GBQ~~--t{{;^^++T-@-@A
~,-
z./ //66tV[6\ HHT.	hht40 15	)8,-26HI./ %%f4	!))&8 #&
 	
rt   c                0    [         R                  " XU5      $ )aP  Apply the SaltAndPepper transform to the input image.

Args:
    img (np.ndarray): The input image to apply the SaltAndPepper transform to.
    salt_mask (np.ndarray): The salt mask to apply to the image.
    pepper_mask (np.ndarray): The pepper mask to apply to the image.
    **params (Any): Additional parameters for the transform.

)r   apply_salt_and_pepper)rp   r   r  r  r   s        rq   r   SaltAndPepper.applyd  s      ++CKHHrt   )r~  r  ))r  gQ?)r?  r  r   )r~  r   r  r   r   r   rj  )
r   r   r  r   r  r   r   r   rw   r   r  r   s   @rq   rQ   rQ     s    N`a, a '3.8	-#- ,- 	- -)
)
 )
 
	)
VII I  	I
 I 
I Irt   rQ   c                    ^  \ rS rSrSr " S S\5      r     S         SU 4S jjjr      SS jr            SS jr	\
" SS	S
S	S9SS j5       r\
" SS
S	S
S9SS j5       r\
" SS
S
S
S9SS j5       rSrU =r$ )rC   iw  u  Apply plasma fractal pattern to modify image brightness and contrast.

Uses Diamond-Square algorithm to generate organic-looking fractal patterns
that create spatially-varying brightness and contrast adjustments.

Args:
    brightness_range ((float, float)): Range for brightness adjustment strength.
        Values between -1 and 1:
        - Positive values increase brightness
        - Negative values decrease brightness
        - 0 means no brightness change
        Default: (-0.3, 0.3)

    contrast_range ((float, float)): Range for contrast adjustment strength.
        Values between -1 and 1:
        - Positive values increase contrast
        - Negative values decrease contrast
        - 0 means no contrast change
        Default: (-0.3, 0.3)

    plasma_size (int): Size of the initial plasma pattern grid.
        Larger values create more detailed patterns but are slower to compute.
        The pattern will be resized to match the input image dimensions.
        Default: 256

    roughness (float): Controls how quickly the noise amplitude increases at each iteration.
        Must be greater than 0:
        - Low values (< 1.0): Smoother, more gradual pattern
        - Medium values (~2.0): Natural-looking pattern
        - High values (> 3.0): Very rough, noisy pattern
        Default: 3.0

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Works with any number of channels (grayscale, RGB, multispectral)
    - The same plasma pattern is applied to all channels
    - Operations are performed in float32 precision
    - Final values are clipped to valid range [0, max_value]

Mathematical Formulation:
    1. Plasma Pattern Generation (Diamond-Square Algorithm):
       Starting with a 3x3 grid of random values in [-1, 1], iteratively:
       a) Diamond Step: For each 2x2 cell, compute center using diamond kernel:
          [[0.25, 0.0, 0.25],
           [0.0,  0.0, 0.0 ],
           [0.25, 0.0, 0.25]]

       b) Square Step: Fill remaining points using square kernel:
          [[0.0,  0.25, 0.0 ],
           [0.25, 0.0,  0.25],
           [0.0,  0.25, 0.0 ]]

       c) Add random noise scaled by roughness^iteration

       d) Normalize final pattern P to [0,1] range using min-max normalization

    2. Brightness Adjustment:
       For each pixel (x,y):
       O(x,y) = I(x,y) + b·P(x,y)
       where:
       - I is the input image
       - b is the brightness factor
       - P is the normalized plasma pattern

    3. Contrast Adjustment:
       For each pixel (x,y):
       O(x,y) = I(x,y)·(1 + c·P(x,y)) + μ·(1 - (1 + c·P(x,y)))
       where:
       - I is the input image
       - c is the contrast factor
       - P is the normalized plasma pattern
       - μ is the mean pixel value

Examples:
    >>> import albumentations as A
    >>> import numpy as np

    # Default parameters
    >>> transform = A.PlasmaBrightnessContrast(p=1.0)

    # Custom adjustments
    >>> transform = A.PlasmaBrightnessContrast(
    ...     brightness_range=(-0.5, 0.5),
    ...     contrast_range=(-0.3, 0.3),
    ...     plasma_size=512,    # More detailed pattern
    ...     roughness=0.7,      # Smoother transitions
    ...     p=1.0
    ... )

References:
    - Fournier, Fussell, and Carpenter, "Computer rendering of stochastic models,": Communications of
        the ACM, 1982. Paper introducing the Diamond-Square algorithm.
    - Diamond-Square algorithm: https://en.wikipedia.org/wiki/Diamond-square_algorithm

See Also:
    - RandomBrightnessContrast: For uniform brightness/contrast adjustments
    - CLAHE: For contrast limited adaptive histogram equalization
    - FancyPCA: For color-based contrast enhancement
    - HistogramMatching: For reference-based contrast adjustment

c                  V    \ rS rSr% S\S'   S\S'   \" SS9rS\S'   \" S	S
9rS\S'   Srg)#PlasmaBrightnessContrast.InitSchemai  zIAnnotated[tuple[float, float], AfterValidator(check_range_bounds(-1, 1))]brightness_rangecontrast_ranger   r   r   plasma_sizer   r   r   	roughnessru   N)	rx   ry   rz   r{   r|   r   r  r  r}   ru   rt   rq   r~   r    s4    
 	

 	
 !A;S& A;	5&rt   r~   c                P   > [         TU ]  US9  Xl        X l        X0l        X@l        g r   )r   r   r  r  r  r  )rp   r  r  r  r  r   r   s         rq   r   !PlasmaBrightnessContrast.__init__  s,     	1 0,&"rt   c                    US   nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 n[        R
                  " USS U R                  U R                  S9nUUUS.$ )zGenerate parameters for the PlasmaBrightnessContrast transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r   Nr   target_shaper  r   )brightness_factorcontrast_factorplasma_pattern)r   r   r  r  r   generate_plasma_patternr  r   )rp   r   r   r   r:  r;  plasmas          rq   r   5PlasmaBrightnessContrast.get_params_dependent_on_data  s     w ^^++T-B-BC
>>))4+>+>? //rnn!22
 ",'$
 	
rt   c                4    [         R                  " UUUU5      $ )a  Apply the PlasmaBrightnessContrast transform to the input image.

Args:
    img (np.ndarray): The input image to apply the PlasmaBrightnessContrast transform to.
    brightness_factor (float): The brightness factor to apply to the image.
    contrast_factor (float): The contrast factor to apply to the image.
    plasma_pattern (np.ndarray): The plasma pattern to apply to the image.
    **params (Any): Additional parameters for the transform.

)r    apply_plasma_brightness_contrast)rp   r   r  r  r  r   s         rq   r   PlasmaBrightnessContrast.apply  s#    $ 66	
 	
rt   spatialFTkeep_depth_dimr   r   c                (    U R                   " U40 UD6$ )zApply the PlasmaBrightnessContrast transform to a batch of images.

Args:
    images (np.ndarray): The input images to apply the PlasmaBrightnessContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   (PlasmaBrightnessContrast.apply_to_images7       zz&+F++rt   c                (    U R                   " U40 UD6$ )zApply the PlasmaBrightnessContrast transform to a volume.

Args:
    volume (np.ndarray): The input volume to apply the PlasmaBrightnessContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   (PlasmaBrightnessContrast.apply_to_volumeB  r  rt   c                (    U R                   " U40 UD6$ )zApply the PlasmaBrightnessContrast transform to a batch of volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the PlasmaBrightnessContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   )PlasmaBrightnessContrast.apply_to_volumesM       zz',V,,rt   )r  r  r  r  )g333333ӿr   r           @r   )
r  r   r  r   r  r   r  r   r   r   rj  )r   r   r  r   r  r   r  r   r   r   rw   r   r   r   r   rx   ry   rz   r{   r   r(   r~   r   r   r   r   r   r   r   r}   r   r   s   @rq   rC   rC   w  s%   kZ
', 
' 1<.9#-# ,# 	#
 # # #

 
 
	
>

 !
 	

 #
 
 

2 YuDX]^, _, Yt5X\], ^, Yt4W[\- ]-rt   rC   c                     ^  \ rS rSrSr " S S\5      r    S       SU 4S jjjr      SS jr          SS jr	\
" SS	S
S	S9SS j5       r\
" SS
S	S
S9SS j5       r\
" SS
S
S
S9SS j5       rSrU =r$ )rD   iY  a  Apply plasma-based shadow effect to the image using Diamond-Square algorithm.

Creates organic-looking shadows using plasma fractal noise pattern.
The shadow intensity varies smoothly across the image, creating natural-looking
darkening effects that can simulate shadows, shading, or lighting variations.

Args:
    shadow_intensity_range (tuple[float, float]): Range for shadow intensity.
        Values between 0 and 1:
        - 0 means no shadow (original image)
        - 1 means maximum darkening (black)
        - Values between create partial shadows
        Default: (0.3, 0.7)

    roughness (float): Controls how quickly the noise amplitude increases at each iteration.
        Must be greater than 0:
        - Low values (< 1.0): Smoother, more gradual shadows
        - Medium values (~2.0): Natural-looking shadows
        - High values (> 3.0): Very rough, noisy shadows
        Default: 3.0

    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - The transform darkens the image using a plasma pattern
    - Works with any number of channels (grayscale, RGB, multispectral)
    - Shadow pattern is generated using Diamond-Square algorithm with specific kernels
    - The same shadow pattern is applied to all channels
    - Final values are clipped to valid range [0, max_value]

Mathematical Formulation:
    1. Plasma Pattern Generation (Diamond-Square Algorithm):
       Starting with a 3x3 grid of random values in [-1, 1], iteratively:
       a) Diamond Step: For each 2x2 cell, compute center using diamond kernel:
          [[0.25, 0.0, 0.25],
           [0.0,  0.0, 0.0 ],
           [0.25, 0.0, 0.25]]

       b) Square Step: Fill remaining points using square kernel:
          [[0.0,  0.25, 0.0 ],
           [0.25, 0.0,  0.25],
           [0.0,  0.25, 0.0 ]]

       c) Add random noise scaled by roughness^iteration

       d) Normalize final pattern P to [0,1] range using min-max normalization

    2. Shadow Application:
       For each pixel (x,y):
       O(x,y) = I(x,y) * (1 - i*P(x,y))
       where:
       - I is the input image
       - P is the normalized plasma pattern
       - i is the sampled shadow intensity
       - O is the output image

Examples:
    >>> import albumentations as A
    >>> import numpy as np

    # Default parameters for natural shadows
    >>> transform = A.PlasmaShadow(p=1.0)

    # Subtle, smooth shadows
    >>> transform = A.PlasmaShadow(
    ...     shadow_intensity_range=(0.1, 0.3),
    ...     roughness=0.7,
    ...     p=1.0
    ... )

    # Dramatic, detailed shadows
    >>> transform = A.PlasmaShadow(
    ...     shadow_intensity_range=(0.5, 0.9),
    ...     roughness=0.3,
    ...     p=1.0
    ... )

References:
    - Fournier, Fussell, and Carpenter, "Computer rendering of stochastic models,": Communications of
        the ACM, 1982. Paper introducing the Diamond-Square algorithm.
    - Diamond-Square algorithm: https://en.wikipedia.org/wiki/Diamond-square_algorithm

See Also:
    - PlasmaBrightnessContrast: For brightness/contrast adjustments using plasma patterns
    - RandomShadow: For geometric shadow effects
    - RandomToneCurve: For global lighting adjustments
    - PlasmaBrightnessContrast: For brightness/contrast adjustments using plasma patterns

c                  6    \ rS rSr% S\S'   \" SS9rS\S'   Srg	)
PlasmaShadow.InitSchemai  r\  r  r   r   r   r  ru   N)rx   ry   rz   r{   r|   r   r  r}   ru   rt   rq   r~   r    s     hh A;	5&rt   r~   c                D   > [         TU ]  US9  Xl        X l        X0l        g r   )r   r   r  r  r  )rp   r  r  r  r   r   s        rq   r   PlasmaShadow.__init__  s&     	1&<#&"rt   c                    US   nU R                   R                  " U R                  6 n[        R                  " USS U R
                  U R                  S9nUUS.$ )zGenerate parameters for the PlasmaShadow transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r   Nr   r  )rw  r  )r   r   r  r   r  r  r   )rp   r   r   r   rw  r  s         rq   r   )PlasmaShadow.get_params_dependent_on_data  sf     w NN**D,G,GH	 //rnn!22
 #$
 	
rt   c                0    [         R                  " XU5      $ )a]  Apply the PlasmaShadow transform to the input image.

Args:
    img (np.ndarray): The input image to apply the PlasmaShadow transform to.
    intensity (float): The intensity of the shadow to apply to the image.
    plasma_pattern (np.ndarray): The plasma pattern to apply to the image.
    **params (Any): Additional parameters for the transform.

)r   apply_plasma_shadow)rp   r   rw  r  r   s        rq   r   PlasmaShadow.apply  s      ))#.IIrt   r  FTr  c                (    U R                   " U40 UD6$ )zApply the PlasmaShadow transform to a batch of images.

Args:
    images (np.ndarray): The input images to apply the PlasmaShadow transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   PlasmaShadow.apply_to_images  r  rt   c                (    U R                   " U40 UD6$ )zApply the PlasmaShadow transform to a batch of volume.

Args:
    volume (np.ndarray): The input volume to apply the PlasmaShadow transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   PlasmaShadow.apply_to_volume  r  rt   c                (    U R                   " U40 UD6$ )zApply the PlasmaShadow transform to a batch of volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the PlasmaShadow transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   PlasmaShadow.apply_to_volumes  r  rt   )r  r  r  ))r   rh  r  r  r   )r  r   r  r   r  r   r   r   rj  )
r   r   rw  r   r  r   r   r   rw   r   r   r   r   r  r   s   @rq   rD   rD   Y  s   ^@', ' 7A
# 3
# 
# 	
#
 
# 
#

 
 
	
:JJ J #	J
 J 
J$ YuDX]^, _, Yt5X\], ^, Yt4W[\- ]-rt   rD   c                     ^  \ rS rSrSr " S S\5      r       S	             S
U 4S jjjrSS jrSS jr	Sr
U =r$ )r=   i  u>  Apply various illumination effects to the image.

This transform simulates different lighting conditions by applying controlled
illumination patterns. It can create effects like:
- Directional lighting (linear mode)
- Corner shadows/highlights (corner mode)
- Spotlights or local lighting (gaussian mode)

These effects can be used to:
- Simulate natural lighting variations
- Add dramatic lighting effects
- Create synthetic shadows or highlights
- Augment training data with different lighting conditions

Args:
    mode (Literal["linear", "corner", "gaussian"]): Type of illumination pattern:
        - 'linear': Creates a smooth gradient across the image,
                   simulating directional lighting like sunlight
                   through a window
        - 'corner': Applies gradient from any corner,
                   simulating light source from a corner
        - 'gaussian': Creates a circular spotlight effect,
                     simulating local light sources
        Default: 'linear'

    intensity_range (tuple[float, float]): Range for effect strength.
        Values between 0.01 and 0.2:
        - 0.01-0.05: Subtle lighting changes
        - 0.05-0.1: Moderate lighting effects
        - 0.1-0.2: Strong lighting effects
        Default: (0.01, 0.2)

    effect_type (str): Type of lighting change:
        - 'brighten': Only adds light (like a spotlight)
        - 'darken': Only removes light (like a shadow)
        - 'both': Randomly chooses between brightening and darkening
        Default: 'both'

    angle_range (tuple[float, float]): Range for gradient angle in degrees.
        Controls direction of linear gradient:
        - 0°: Left to right
        - 90°: Top to bottom
        - 180°: Right to left
        - 270°: Bottom to top
        Only used for 'linear' mode.
        Default: (0, 360)

    center_range (tuple[float, float]): Range for spotlight position.
        Values between 0 and 1 representing relative position:
        - (0, 0): Top-left corner
        - (1, 1): Bottom-right corner
        - (0.5, 0.5): Center of image
        Only used for 'gaussian' mode.
        Default: (0.1, 0.9)

    sigma_range (tuple[float, float]): Range for spotlight size.
        Values between 0.2 and 1.0:
        - 0.2: Small, focused spotlight
        - 0.5: Medium-sized light area
        - 1.0: Broad, soft lighting
        Only used for 'gaussian' mode.
        Default: (0.2, 1.0)

    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image

Image types:
    uint8, float32

Examples:
    >>> import albumentations as A
    >>> # Simulate sunlight through window
    >>> transform = A.Illumination(
    ...     mode='linear',
    ...     intensity_range=(0.05, 0.1),
    ...     effect_type='brighten',
    ...     angle_range=(30, 60)
    ... )
    >>>
    >>> # Create dramatic corner shadow
    >>> transform = A.Illumination(
    ...     mode='corner',
    ...     intensity_range=(0.1, 0.2),
    ...     effect_type='darken'
    ... )
    >>>
    >>> # Add multiple spotlights
    >>> transform1 = A.Illumination(
    ...     mode='gaussian',
    ...     intensity_range=(0.05, 0.15),
    ...     effect_type='brighten',
    ...     center_range=(0.2, 0.4),
    ...     sigma_range=(0.2, 0.3)
    ... )
    >>> transform2 = A.Illumination(
    ...     mode='gaussian',
    ...     intensity_range=(0.05, 0.15),
    ...     effect_type='darken',
    ...     center_range=(0.6, 0.8),
    ...     sigma_range=(0.3, 0.5)
    ... )
    >>> transforms = A.Compose([transform1, transform2])

References:
    - Lighting in Computer Vision:
      https://en.wikipedia.org/wiki/Lighting_in_computer_vision

    - Image-based lighting:
      https://en.wikipedia.org/wiki/Image-based_lighting

    - Similar implementation in Kornia:
      https://kornia.readthedocs.io/en/latest/augmentation.html#randomlinearillumination

    - Research on lighting augmentation:
      "Learning Deep Representations of Fine-grained Visual Descriptions"
      https://arxiv.org/abs/1605.05395

    - Photography lighting patterns:
      https://en.wikipedia.org/wiki/Lighting_pattern

Note:
    - The transform preserves image range and dtype
    - Effects are applied multiplicatively to preserve texture
    - Can be combined with other transforms for complex lighting scenarios
    - Useful for training models to be robust to lighting variations

c                  R    \ rS rSr% S\S'   S\S'   S\S'   S\S	'   S
\S'   S\S'   Srg)Illumination.InitSchemai  'Literal['linear', 'corner', 'gaussian']rk   zMAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0.01, 0.2))]intensity_range%Literal['brighten', 'darken', 'both']effect_typezJAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0, 360))]r  r\  center_rangezLAnnotated[tuple[float, float], AfterValidator(check_range_bounds(0.2, 1.0))]sigma_rangeru   Nr   ru   rt   rq   r~   r    s8    55
 	
 ;:
 	

 	

 	
rt   r~   c                h   > [         TU ]  US9  Xl        X l        X0l        X@l        XPl        X`l        g r   )r   r   rk   r  r  r  r  r  )	rp   rk   r  r  r  r  r  r   r   s	           rq   r   Illumination.__init__  s8     	1	.&&(&rt   c                   U R                   R                  " U R                  6 nSnU R                  S:X  a#  U R                   R	                  5       S:  a  SOSnOU R                  S:X  a  SnX4-  nU R
                  S:X  a(  U R                   R                  " U R                  6 nUUS.$ U R
                  S:X  a!  U R                   R                  S	S
5      nUUS.$ U R                   R                  " U R                  6 nU R                   R                  " U R                  6 nU R                   R                  " U R                  6 n	UXx4U	S.$ )zGenerate parameters for the Illumination transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r   bothr   r  darkenlinearrw  r  cornerr   r[   rw  r  rw  r}  r  )
r   r   r  r  r  rk   r  r   r  r  )
rp   r   r   rw  signr  r  r:  r;  r  s
             rq   r   )Illumination.get_params_dependent_on_data  s8    NN**D,@,@A	 v%--/#512D)D	99 NN**D,<,<=E&  99 ^^++Aq1F&  
 NN""D$5$56NN""D$5$56&&(8(89"f
 	
rt   c                    U R                   S:X  a  [        R                  " UUS   US   S9$ U R                   S:X  a  [        R                  " UUS   US   S9$ [        R                  " UUS   US   US   S	9$ )
zApply the Illumination transform to the input image.

Args:
    img (np.ndarray): The input image to apply the Illumination transform to.
    **params (Any): Additional parameters for the transform.

r  rw  r  r  r  r  r}  r  r  )rk   r   apply_linear_illuminationapply_corner_illuminationapply_gaussian_illuminationr   s      rq   r   Illumination.apply  s     99 33 -Wo 
 99 33 -h'  11[)(#/	
 	
rt   )r  r  r  r  rk   r  )r  )r  r  r  )r   ih  )r   r@  )r  r   r   )rk   r  r  r   r  r  r  r   r  r   r  r   r   r   rj  r   r  r   s   @rq   r=   r=     s    @D
, 
, 9A/:=C+3,6+5'5' -' ;	'
 )' *' )' ' '$'
R
 
rt   r=   c                     ^  \ rS rSrSr " S S\5      r    S       SU 4S jjjrSS jr\	" SSS	S
9SS j5       r
\	" SS	SS
9SS j5       r\	" SSSS
9SS j5       rSrU =r$ )r1   i
  a  Automatically adjust image contrast by stretching the intensity range.

This transform provides two methods for contrast enhancement:
1. CDF method (default): Uses cumulative distribution function for more gradual adjustment
2. PIL method: Uses linear scaling like PIL.ImageOps.autocontrast

The transform can optionally exclude extreme values from both ends of the
intensity range and preserve specific intensity values (e.g., alpha channel).

Args:
    cutoff (float): Percentage of pixels to exclude from both ends of the histogram.
        Range: [0, 100]. Default: 0 (use full intensity range)
        - 0 means use the minimum and maximum intensity values found
        - 20 means exclude darkest and brightest 20% of pixels
    ignore (int, optional): Intensity value to preserve (e.g., alpha channel).
        Range: [0, 255]. Default: None
        - If specified, this intensity value will not be modified
        - Useful for images with alpha channel or special marker values
    method (Literal["cdf", "pil"]): Algorithm to use for contrast enhancement.
        Default: "cdf"
        - "cdf": Uses cumulative distribution for smoother adjustment
        - "pil": Uses linear scaling like PIL.ImageOps.autocontrast
    p (float): Probability of applying the transform. Default: 0.5

Targets:
    image

Image types:
    uint8, float32

Note:
    - The transform processes each color channel independently
    - For grayscale images, only one channel is processed
    - The output maintains the same dtype as input
    - Empty or single-color channels remain unchanged

Examples:
    >>> import albumentations as A
    >>> # Basic usage
    >>> transform = A.AutoContrast(p=1.0)
    >>>
    >>> # Exclude extreme values
    >>> transform = A.AutoContrast(cutoff=20, p=1.0)
    >>>
    >>> # Preserve alpha channel
    >>> transform = A.AutoContrast(ignore=255, p=1.0)
    >>>
    >>> # Use PIL-like contrast enhancement
    >>> transform = A.AutoContrast(method="pil", p=1.0)

c                  P    \ rS rSr% \" SSS9rS\S'   \" SSS9rS\S	'   S
\S'   Srg)AutoContrast.InitSchemai?  r   r   ro  r   r  r)  rG  r  Literal['cdf', 'pil']r   ru   N)	rx   ry   rz   r{   r   r  r|   r  r}   ru   rt   rq   r~   r  ?  s*    s++"aC0
0%%rt   r~   c                D   > [         TU ]  US9  Xl        X l        X0l        g r   )r   r   r  r  r   )rp   r  r  r   r   r   s        rq   r   AutoContrast.__init__D  s$     	1rt   c                n    [         R                  " XR                  U R                  U R                  5      $ )zApply the AutoContrast transform to the input image.

Args:
    img (np.ndarray): The input image to apply the AutoContrast transform to.
    **params (Any): Additional parameters for the transform.

)r   auto_contrastr  r  r   r   s      rq   r   AutoContrast.applyP  s%     ##Cdkk4;;OOrt   r   TFr   c                (    U R                   " U40 UD6$ )zApply the AutoContrast transform to a batch of images.

Args:
    images (np.ndarray): The input images to apply the AutoContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   AutoContrast.apply_to_imagesZ  r  rt   c                (    U R                   " U40 UD6$ )zApply the AutoContrast transform to a batch of volumes.

Args:
    volume (np.ndarray): The input volume to apply the AutoContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   AutoContrast.apply_to_volumee  r  rt   c                (    U R                   " U40 UD6$ )zApply the AutoContrast transform to a batch of volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the AutoContrast transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   AutoContrast.apply_to_volumesp  r  rt   )r  r  r   )r   Ncdfr   )r  r   r  rG  r   r  r   r   r   r   r   r   r   r   s   @rq   r1   r1   
  s    2h&, & !(-

 
 &	

 
 
P Yd%H, I, Ye4H, I, Yd$G- H-rt   r1   c                    ^  \ rS rSrSr " S S\5      r      S           SU 4S jjjrSS jr            SS jr	\
" S	S
SS9SS j5       r\
" S	SS
S9SS j5       r\
" S	S
S
S9SS j5       rSS jrSrU =r$ )r:   i|  a  Applies H&E (Hematoxylin and Eosin) stain augmentation to histopathology images.

This transform simulates different H&E staining conditions using either:
1. Predefined stain matrices (8 standard references)
2. Vahadane method for stain extraction
3. Macenko method for stain extraction
4. Custom stain matrices

Args:
    method(Literal["preset", "random_preset", "vahadane", "macenko"]): Method to use for stain augmentation:
        - "preset": Use predefined stain matrices
        - "random_preset": Randomly select a preset matrix each time
        - "vahadane": Extract using Vahadane method
        - "macenko": Extract using Macenko method
        Default: "preset"

    preset(str | None): Preset stain matrix to use when method="preset":
        - "ruifrok": Standard reference from Ruifrok & Johnston
        - "macenko": Reference from Macenko's method
        - "standard": Typical bright-field microscopy
        - "high_contrast": Enhanced contrast
        - "h_heavy": Hematoxylin dominant
        - "e_heavy": Eosin dominant
        - "dark": Darker staining
        - "light": Lighter staining
        Default: "standard"

    intensity_scale_range(tuple[float, float]): Range for multiplicative stain intensity variation.
        Values are multipliers between 0.5 and 1.5. For example:
        - (0.7, 1.3) means stain intensities will vary from 70% to 130%
        - (0.9, 1.1) gives subtle variations
        - (0.5, 1.5) gives dramatic variations
        Default: (0.7, 1.3)

    intensity_shift_range(tuple[float, float]): Range for additive stain intensity variation.
        Values between -0.3 and 0.3. For example:
        - (-0.2, 0.2) means intensities will be shifted by -20% to +20%
        - (-0.1, 0.1) gives subtle shifts
        - (-0.3, 0.3) gives dramatic shifts
        Default: (-0.2, 0.2)

    augment_background(bool): Whether to apply augmentation to background regions.
        Default: False

Targets:
    image

Number of channels:
    3

Image types:
    uint8, float32

References:
    - A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical": Analytical and quantitative
        cytology and histology, 2001.
    - M. Macenko et al., "A method for normalizing histology slides for: 2009 IEEE International Symposium on
        quantitative analysis," 2009 IEEE International Symposium on Biomedical Imaging, 2009.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create a sample H&E stained histopathology image
    >>> # For real use cases, load an actual H&E stained image
    >>> image = np.zeros((300, 300, 3), dtype=np.uint8)
    >>> # Simulate tissue regions with different staining patterns
    >>> image[50:150, 50:150] = np.array([120, 140, 180], dtype=np.uint8)  # Hematoxylin-rich region
    >>> image[150:250, 150:250] = np.array([140, 160, 120], dtype=np.uint8)  # Eosin-rich region
    >>>
    >>> # Example 1: Using a specific preset stain matrix
    >>> transform = A.HEStain(
    ...     method="preset",
    ...     preset="standard",
    ...     intensity_scale_range=(0.8, 1.2),
    ...     intensity_shift_range=(-0.1, 0.1),
    ...     augment_background=False,
    ...     p=1.0
    ... )
    >>> result = transform(image=image)
    >>> transformed_image = result['image']
    >>>
    >>> # Example 2: Using random preset selection
    >>> transform = A.HEStain(
    ...     method="random_preset",
    ...     intensity_scale_range=(0.7, 1.3),
    ...     intensity_shift_range=(-0.15, 0.15),
    ...     p=1.0
    ... )
    >>> result = transform(image=image)
    >>> transformed_image = result['image']
    >>>
    >>> # Example 3: Using Vahadane method (requires H&E stained input)
    >>> transform = A.HEStain(
    ...     method="vahadane",
    ...     intensity_scale_range=(0.7, 1.3),
    ...     p=1.0
    ... )
    >>> result = transform(image=image)
    >>> transformed_image = result['image']
    >>>
    >>> # Example 4: Using Macenko method (requires H&E stained input)
    >>> transform = A.HEStain(
    ...     method="macenko",
    ...     intensity_scale_range=(0.7, 1.3),
    ...     intensity_shift_range=(-0.2, 0.2),
    ...     p=1.0
    ... )
    >>> result = transform(image=image)
    >>> transformed_image = result['image']
    >>>
    >>> # Example 5: Combining with other transforms in a pipeline
    >>> transform = A.Compose([
    ...     A.HEStain(method="preset", preset="high_contrast", p=1.0),
    ...     A.RandomBrightnessContrast(p=0.5),
    ... ])
    >>> result = transform(image=image)
    >>> transformed_image = result['image']

c                  d    \ rS rSr% S\S'   S\S'   S\S'   S\S	'   S
\S'   \" SS9SS j5       rSrg)HEStain.InitSchemai  9Literal['preset', 'random_preset', 'vahadane', 'macenko']r   hLiteral['ruifrok', 'macenko', 'standard', 'high_contrast', 'h_heavy', 'e_heavy', 'dark', 'light'] | Nonepresetr2  intensity_scale_rangezhAnnotated[tuple[float, float], AfterValidator(nondecreasing), AfterValidator(check_range_bounds(-1, 1))]intensity_shift_ranger  augment_backgroundri   rj   c                    U R                   S:X  a  U R                  c	  SU l        U $ U R                   S:X  a  U R                  b  [        S5      eU $ )Nr  rm   random_presetz:preset should not be specified when method='random_preset')r   r  rn   ro   s    rq   _validate_matrix_selection-HEStain.InitSchema._validate_matrix_selection  sM    {{h&4;;+>( K /DKK4K !]^^Krt   )r  Nrv   )rx   ry   rz   r{   r|   r   r  r}   ru   rt   rq   r~   r    sE    II
	
 
 	

 
 	

 ! 	g	&	 
'	rt   r~   Fc                   > [         TU ]  US9  Xl        X l        X0l        X@l        XPl        S U l        US;   a%  [        R                  " [        SU5      5      U l        / SQU l        g )Nr   )vahadanemacenkozLiteral['vahadane', 'macenko'])ruifrokr  rm   high_contrasth_heavye_heavydarklight)r   r   r   r  r  r  r  stain_normalizerr   get_normalizerr	   stain_extractorpreset_names)rp   r   r  r  r  r  r   r   s          rq   r   HEStain.__init__  sl    & 	1%:"%:""4 $ ,,#)#8#85v>$D 	
rt   c                h   U R                   S:X  a*  U R                  b  [        R                  U R                     $ U R                   S:X  a8  U R                  R                  U R                  5      n[        R                  U   $ U R                  R                  U5        U R                  R                  $ )z*Get stain matrix based on selected method.r  r  )
r   r  r   STAIN_MATRICESr   r  r  r  fitstain_matrix_target)rp   r   r  s      rq   _get_stain_matrixHEStain._get_stain_matrixF  s    ;;("t{{'>((55;;/) NN11$2C2CDM((77  %##777rt   c                \    [        U5        [        R                  " UUUUU R                  S9$ )a  Apply the HEStain transform to the input image.

Args:
    img (np.ndarray): The input image to apply the HEStain transform to.
    stain_matrix (np.ndarray): The stain matrix to use for the transform.
    scale_factors (np.ndarray): The scale factors to use for the transform.
    shift_values (np.ndarray): The shift values to use for the transform.
    **params (Any): Additional parameters for the transform.

)r   stain_matrixscale_factorsshift_valuesr  )r    r   apply_he_stain_augmentationr  )rp   r   r  r  r  r   s         rq   r   HEStain.applyQ  s4    $ 	c11%'%#66
 	
rt   r   Tr   c                (    U R                   " U40 UD6$ )zApply the HEStain transform to a batch of images.

Args:
    images (np.ndarray): The input images to apply the HEStain transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   HEStain.apply_to_imagesl  r  rt   c                (    U R                   " U40 UD6$ )zApply the HEStain transform to a batch of volumes.

Args:
    volume (np.ndarray): The input volumes to apply the HEStain transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   HEStain.apply_to_volumew  r  rt   c                (    U R                   " U40 UD6$ )zApply the HEStain transform to a batch of volumes.

Args:
    volumes (np.ndarray): The input volumes to apply the HEStain transform to.
    **params (Any): Additional parameters for the transform.

r   r   s      rq   r   HEStain.apply_to_volumes  r  rt   c                   SU;   a  US   OUS   S   nU R                  U5      n[        R                  " U R                  R                  " U R
                  6 U R                  R                  " U R
                  6 /5      n[        R                  " U R                  R                  " U R                  6 U R                  R                  " U R                  6 /5      nUUUS.$ )zGenerate parameters for the HEStain transform.

Args:
    params (dict[str, Any]): The parameters of the transform.
    data (dict[str, Any]): The data to apply the transform to.

r  r   r   )r  r  r  )r  r   r   r   r   r  r  )rp   r   r   r  r  r  r  s          rq   r   $HEStain.get_params_dependent_on_data  s     ")DWd8nQ6G--e4 &&(B(BC&&(B(BC
 xx&&(B(BC&&(B(BC
 )*(
 	
rt   )r  r  r  r   r  r  r  r  )r  N)rh  g?rl  Fr   )r   r  r  r  r  r   r  r   r  r  r   r   )r   r   rw   r   )r   r   r  r   r  r   r  r   r   r   rw   r   r   r   r   rj  )rx   ry   rz   r{   r   r(   r~   r   r  r   r   r   r   r   r   r}   r   r   s   @rq   r:   r:   |  s   xt!, !J M\ 5?5@#(#*
I*

*
  3*
  3*
  !!*
" #*
 *
X	8

 !
 "	

 !
 
 

6 Yd%H, I, Ye4H, I, Yd$G- H-
 
rt   r:   )r   
__future__r   r  rB  r  collections.abcr   typingr   r   r   r   r	   rX  r  numpyr   r
   r   r   r   r   r   r   r   r   pydanticr   r   r   r   r   r   r   scipyr   typing_extensionsr   r   1albumentations.augmentations.geometric.functionalaugmentations	geometricr   r|  !albumentations.augmentations.blurr  ,albumentations.augmentations.blur.transformsr   "albumentations.augmentations.pixelr   "albumentations.augmentations.utilsr   r    albumentations.core.pydanticr!   r"   r#   r$   r%   r&   r'   (albumentations.core.transforms_interfacer(   r)   $albumentations.core.type_definitionsr*   r+   r,   r-   albumentations.core.utilsr.   __all__NUM_BITS_ARRAY_LENGTHTWENTYrA   r>   rM   rJ   rK   rH   rN   rL   rO   r;   rT   rE   r7   rG   r9   r<   r/   r2   r?   rI   rW   rX   rY   r  r5   r@   r8   r4   rR   r6   rV   rP   rZ   rU   r3   r-  PLANCKIAN_COEFFSkeysr.  r  rB   rS   r;  rA  rM  rR  rU  NoiseParamsr0   rF   rQ   rC   rD   r=   r1   r:   ru   rt   rq   <module>r5     sl   #    $ 8 8  
 
 
 
    + F F A G C I    /-^  	n-" n-bm
) m
`h# hVI% IDV# VDD
" D
NV
' V
r|L% |L~R2( R2jr
+ r
jkL! kL\}D" }D@`)! `)FU
1 U
p{(# {(|{
! {
|]H ]H@@-' @-F\3" \3~Y
$ Y
xzJ zJzE
 E
P_W  _WDI ,yD" yDxF*, F*R].! ].@{$ {|}Q  }Q@o3 o3de
$ e
PL,) L,^L
$ L
^k
  k
\R, Rl  		 	 	-	2	2	4		 	 	(	-	-	/ f55kBGGIJ008==?@	 O1( O1dc
" c
Li O $	_ 		O 	   	-

BC	%'B(& B(JyH} yHxZI& ZIz_-1 _-D-% -Dl
% l
^o-% o-dp
  p
rt   