
    h                    2   S r SSKJr  SSKrSSKJr  SSKJrJrJ	r	J
r
Jr  SSKrSSKrSSKJrJrJr  SSKJr  SSKJrJrJr  SS	KJr  SS
KJrJrJr  SSKJ r J!r!  / SQr"Sr# " S S\ 5      r$ " S S\!5      r% " S S\%5      r& " S S\%5      r' " S S\%5      r(g)ak  Domain adaptation transforms for image augmentation.

This module provides transformations designed to bridge the domain gap between
datasets by adapting the style of an input image to match that of reference images
from a target domain. Adaptations are based on matching statistical properties
like histograms, frequency spectra, or overall pixel distributions.
    )annotationsN)Sequence)	AnnotatedAnyCallableLiteralcast)AfterValidatorfield_validatormodel_validator)Self)adapt_pixel_distributionapply_histogramfourier_domain_adaptation)read_rgb_image)ZeroOneRangeTypecheck_range_boundsnondecreasing)BaseTransformInitSchemaImageOnlyTransform)FDAHistogramMatchingPixelDistributionAdaptation      ?c                  P    \ rS rSr% S\S'   S\S'   S\S'   \" SS	9SS
 j5       rSrg)BaseDomainAdaptationInitSchema'   Sequence[Any] | Nonereference_images"Callable[[Any], np.ndarray] | Noneread_fnstrmetadata_keyafter)modec                    U R                   bB  [        R                  " SU R                   S3[        SS9  U R
                  c  Sn[        U5      eU $ )Nzn'reference_images' and 'read_fn' arguments are deprecated. Please pass pre-loaded reference images using the 'z#' key in the input data dictionary.   
stacklevelMread_fn cannot be None when using the deprecated 'reference_images' argument.)r   warningswarnr#   DeprecationWarningr!   
ValueErrorselfmsgs     o/var/www/fran/franai/venv/lib/python3.13/site-packages/albumentations/augmentations/mixing/domain_adaptation.py_check_deprecated_args5BaseDomainAdaptationInitSchema._check_deprecated_args,   s\      ,MM"//00SU # ||#e o%     N)returnr   )__name__
__module____qualname____firstlineno____annotations__r   r3   __static_attributes__r6   r5   r2   r   r   '   s+    **//'" #r5   r   c                  x   ^  \ rS rSr% SrS\S'    S
       SU 4S jjjr\SS j5       rSS jr	SS jr
S	rU =r$ )BaseDomainAdaptation>   a]  Base class for domain adaptation transforms.

Domain adaptation transforms modify source images to match the characteristics of a target domain.
These transforms typically require an additional reference image or dataset from the target domain
to extract style information or domain-specific features.

This base class provides the framework for implementing various domain adaptation techniques such as
color transfer, style transfer, frequency domain adaptation, or histogram matching.

Args:
    reference_images (Sequence[Any] | None): Deprecated. Sequence of references to images from the target
        domain. Should be used with read_fn to load actual images. Prefer passing pre-loaded images via
        metadata_key.
    read_fn (Callable[[Any], np.ndarray] | None): Deprecated. Function to read an image from a reference.
        Should be used with reference_images.
    metadata_key (str): Key in the input data dictionary that contains pre-loaded target domain images.
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Notes:
    - Subclasses should implement the `apply` method to perform the actual adaptation.
    - Use `targets_as_params` property to define what additional data your transform needs.
    - Override `get_params_dependent_on_data` to extract the target domain data.
    - Domain adaptation often requires per-sample auxiliary data, which should be passed
      through the main data dictionary rather than at initialization time.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Implement a simple color transfer domain adaptation transform
    >>> class SimpleColorTransfer(A.BaseDomainAdaptation):
    ...     class InitSchema(A.BaseTransformInitSchema):
    ...         intensity: float = Field(gt=0, le=1)
    ...         reference_key: str
    ...
    ...     def __init__(
    ...         self,
    ...         intensity: float = 0.5,
    ...         reference_key: str = "target_image",
    ...         p: float = 1.0
    ...     ):
    ...         super().__init__(p=p)
    ...         self.intensity = intensity
    ...         self.reference_key = reference_key
    ...
    ...     @property
    ...     def targets_as_params(self) -> list[str]:
    ...         return [self.reference_key]  # We need target domain image
    ...
    ...     def get_params_dependent_on_data(
    ...         self,
    ...         params: dict[str, Any],
    ...         data: dict[str, Any]
    ...     ) -> dict[str, Any]:
    ...         target_image = data.get(self.reference_key)
    ...         if target_image is None:
    ...             # Fallback if target image is not provided
    ...             return {"target_image": None}
    ...         return {"target_image": target_image}
    ...
    ...     def apply(
    ...         self,
    ...         img: np.ndarray,
    ...         target_image: np.ndarray = None,
    ...         **params
    ...     ) -> np.ndarray:
    ...         if target_image is None:
    ...             return img
    ...
    ...         # Simple color transfer implementation
    ...         # Calculate mean and std of source and target images
    ...         src_mean = np.mean(img, axis=(0, 1))
    ...         src_std = np.std(img, axis=(0, 1))
    ...         tgt_mean = np.mean(target_image, axis=(0, 1))
    ...         tgt_std = np.std(target_image, axis=(0, 1))
    ...
    ...         # Normalize source image
    ...         normalized = (img - src_mean) / (src_std + 1e-7)
    ...
    ...         # Scale by target statistics and blend with original
    ...         transformed = normalized * tgt_std + tgt_mean
    ...         transformed = np.clip(transformed, 0, 255).astype(np.uint8)
    ...
    ...         # Blend the result based on intensity
    ...         result = cv2.addWeighted(img, 1 - self.intensity, transformed, self.intensity, 0)
    ...         return result
    >>>
    >>> # Usage example with a target image from a different domain
    >>> source_image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
    >>> target_image = np.random.randint(100, 200, (200, 200, 3), dtype=np.uint8)  # Different domain image
    >>>
    >>> # Create the transform with the pipeline
    >>> transform = A.Compose([
    ...     SimpleColorTransfer(intensity=0.7, reference_key="target_img", p=1.0),
    ... ])
    >>>
    >>> # Apply the transform with the target image passed in the data dictionary
    >>> result = transform(image=source_image, target_img=target_image)
    >>> adapted_image = result["image"]  # Image with characteristics transferred from target domain

z$type[BaseDomainAdaptationInitSchema]
InitSchemac                D   > [         TU ]  US9  Xl        X l        X0l        g )N)p)super__init__r   r!   r#   )r0   r   r!   r#   rC   	__class__s        r2   rE   BaseDomainAdaptation.__init__   s&     	1 0(r5   c                    U R                   /$ )N)r#   )r0   s    r2   targets_as_params&BaseDomainAdaptation.targets_as_params   s    !!""r5   c                
   SnUR                  U R                  5      =n(       a  [        U[        5      (       a  U(       d  [	        SU R                   S35      e[        US   [
        R                  5      (       d  [	        SU R                   S35      eU R                  R                  U5      nU R                  b(  [        R                  " SU R                   S3[        S	S
9  OU R                  bQ  U R                  c  Sn[	        U5      eU R                  R                  U R                  5      nU R                  U5      nO/[	        U R                  R                   SU R                   S35      eUc  Sn[!        U5      eU$ )zDRetrieves the reference image from metadata or deprecated arguments.NzMetadata key 'z6' should contain a non-empty sequence of numpy arrays.r   zImages in metadata key 'z' should be numpy arrays.zLBoth 'reference_images' (deprecated constructor argument) and metadata via 'z'' were provided. Prioritizing metadata.r'   r(   r*   zA requires reference images. Provide them via the `metadata_key` 'zG' in the input data, or use the deprecated 'reference_images' argument.z#Could not obtain a reference image.)getr#   
isinstancer   r.   npndarray	py_randomchoicer   r+   r,   UserWarningr!   rF   r8   RuntimeError)r0   datareference_imagemetadata_imagesr1   
ref_sources         r2   _get_reference_image)BaseDomainAdaptation._get_reference_image   s}   "hht'8'899?9ox88 $T%6%6$77mn  oa0"**== .t/@/@.AAZ[  #nn33ODO$$0))**QS 	 "".||#e o%..t/D/DEJ"ll:6O>>**+ ,%%&&mo 
 "7Cs##r5   c                    U R                   b$  U R                  R                   S3n[        U5      eU R                  R                   S3n[        U5      e)zConvert the transform to a dictionary for serialization.

Raises:
    NotImplementedError: Domain adaptation transforms cannot be reliably serialized
                         when using metadata key or deprecated arguments.

zL cannot be reliably serialized when using the deprecated 'reference_images'.zS cannot be reliably serialized due to its dependency on external data via metadata.)r   rF   r8   NotImplementedErrorr/   s     r2   to_dict_private$BaseDomainAdaptation.to_dict_private   sf       ,>>**++wx  &c** ~~&&' (- - 	 "#&&r5   )r#   r!   r   )r   )r   r   r!   r    r#   r"   rC   float)r7   z	list[str])rT   dict[str, Any]r7   
np.ndarray)r7   r_   )r8   r9   r:   r;   __doc__r<   rE   propertyrI   rX   r\   r=   __classcell__rF   s   @r2   r?   r?   >   sk    kZ 54 
).
) 4
) 	
)
 
) 
) # #*X' 'r5   r?   c                     ^  \ rS rSrSr " S S\5      rSS\SS4         SU 4S	 jjjrSS
 jr	          SS jr
SrU =r$ )r      a  Adjust the pixel value distribution of an input image to match a reference image.

This transform modifies the pixel intensities of the input image so that its histogram
matches the histogram of a provided reference image. This process is applied independently
to each channel of the image if it is multi-channel.

Why use Histogram Matching?

**Domain Adaptation:** Helps bridge the gap between images from different sources
(e.g., different cameras, lighting conditions, synthetic vs. real data) by aligning
their overall intensity and contrast characteristics.

*Use Case Example:* Imagine you have labeled training images from one source (e.g., daytime photos,
medical scans from Hospital A) but expect your model to work on images from a different
source at test time (e.g., nighttime photos, scans from Hospital B). You might only have
unlabeled images from the target (test) domain. HistogramMatching can be used to make your
labeled training images resemble the *style* (intensity and contrast distribution) of the
unlabeled target images. By training on these adapted images, your model may generalize
better to the target domain without needing labels for it.

How it works:
The core idea is to map the pixel values of the input image such that its cumulative
distribution function (CDF) matches the CDF of the reference image. This effectively
reshapes the input image's histogram to resemble the reference's histogram.

Args:
    metadata_key (str): Key in the input `data` dictionary to retrieve the reference image(s).
        The value should be a sequence (e.g., list) of numpy arrays (pre-loaded images).
        Default: "hm_metadata".
    blend_ratio (tuple[float, float]): Range for the blending factor between the original
        and the histogram-matched image. A value of 0 means the original image is returned,
        1 means the fully matched image is returned. A random value within this range [min, max]
        is sampled for each application. This allows for varying degrees of adaptation.
        Default: (0.5, 1.0).
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Requires at least one reference image to be provided via the `metadata_key` argument.
    - The `reference_images` and `read_fn` constructor arguments are deprecated.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create sample images for demonstration
    >>> # Source image: dark image with low contrast
    >>> source_image = np.ones((100, 100, 3), dtype=np.uint8) * 50  # Dark gray image
    >>> source_image[30:70, 30:70] = 100  # Add slightly brighter square in center
    >>>
    >>> # Target image: higher brightness and contrast
    >>> target_image = np.ones((100, 100, 3), dtype=np.uint8) * 150  # Bright image
    >>> target_image[20:80, 20:80] = 200  # Add even brighter square
    >>>
    >>> # Initialize the histogram matching transform with custom settings
    >>> transform = A.Compose([
    ...     A.HistogramMatching(
    ...         blend_ratio=(0.7, 0.9),  # Control the strength of histogram matching
    ...         metadata_key="reference_imgs",  # Custom metadata key
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> # Apply the transform
    >>> result = transform(
    ...     image=source_image,
    ...     reference_imgs=[target_image]  # Pass reference image via metadata key
    ... )
    >>>
    >>> # Get the histogram-matched image
    >>> matched_image = result["image"]
    >>>
    >>> # The matched_image will have brightness and contrast similar to target_image
    >>> # while preserving the content of source_image
    >>>
    >>> # Multiple reference images can be provided:
    >>> ref_imgs = [
    ...     target_image,
    ...     np.random.randint(100, 200, (100, 100, 3), dtype=np.uint8)  # Another reference image
    ... ]
    >>> multiple_refs_result = transform(image=source_image, reference_imgs=ref_imgs)
    >>> # A random reference image from the list will be chosen for each transform application

References:
    Histogram Matching in scikit-image:
        https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_histogram_matching.html

c                       \ rS rSr% S\S'   Srg)HistogramMatching.InitSchemai_  gAnnotated[tuple[float, float], AfterValidator(nondecreasing), AfterValidator(check_range_bounds(0, 1))]blend_ratior6   Nr8   r9   r:   r;   r<   r=   r6   r5   r2   rA   rh   _  s    
 	
r5   rA   N)r         ?hm_metadatar   c                .   > [         TU ]  XXES9  X l        g Nr   r!   r#   rC   )rD   rE   rj   )r0   r   rj   r!   r#   rC   rF   s         r2   rE   HistogramMatching.__init__f  s     	*:Zfl&r5   c                p    U R                  U5      nUU R                  R                  " U R                  6 S.$ )aB  Generate parameters for the transform based on input data.

Args:
    params (dict[str, Any]): Parameters from the previous transform in the pipeline
    data (dict[str, Any]): Input data dictionary containing the image and metadata

Returns:
    dict[str, Any]: Dictionary containing the reference image and blend ratio

rU   rj   rX   rP   uniformrj   r0   paramsrT   rU   s       r2   get_params_dependent_on_data.HistogramMatching.get_params_dependent_on_dataq  s9     33D9.>>1143C3CD
 	
r5   c                    [        XU5      $ )a  Apply histogram matching to the input image.

Args:
    img (np.ndarray): Input image to be transformed
    reference_image (np.ndarray): Reference image for histogram matching
    blend_ratio (float): Blending factor between the original and matched image
    **params (Any): Additional parameters

Returns:
    np.ndarray: Transformed image with histogram matched to the reference image

)r   r0   imgrU   rj   rw   s        r2   applyHistogramMatching.apply  s    & s[AAr5   )rj   )
r   r   rj   tuple[float, float]r!   r    r#   r"   rC   r^   rw   r_   rT   r_   r7   r_   
r|   r`   rU   r`   rj   r^   rw   r   r7   r`   r8   r9   r:   r;   ra   r   rA   r   rE   rx   r}   r=   rc   rd   s   @r2   r   r      s    ]~
3 
 26+56D)	'.	' )	' 4		'
 	' 	' 	'
"BB $B 	B
 B 
B Br5   r   c                     ^  \ rS rSrSr " S S\5      rSS\SS4         SU 4S	 jjjrSS
 jr	          SS jr
SrU =r$ )r   i  a  Fourier Domain Adaptation (FDA).

Adapts the style of the input image to match the style of a reference image
by manipulating their frequency components in the Fourier domain. This is
particularly useful for unsupervised domain adaptation (UDA).

Why use FDA?

**Domain Adaptation:** FDA helps bridge the domain gap between source and target
datasets (e.g., synthetic vs. real, day vs. night) by aligning their low-frequency
Fourier spectrum components. This can improve model performance on the target domain
without requiring target labels.

*Use Case Example:* Imagine you have labeled training data acquired under certain conditions
(e.g., images from Hospital A using a specific scanner) but need your model to perform well
on data from a different distribution (e.g., unlabeled images from Hospital B with a different scanner).
FDA can adapt the labeled source images to match the *style* (frequency characteristics)
of the unlabeled target images, potentially improving the model's generalization to the
target domain at test time.

How it works:
FDA operates in the frequency domain. It replaces the low-frequency components
of the source image's Fourier transform with the low-frequency components from the
reference (target domain) image's Fourier transform. The `beta_limit` parameter
controls the size of the frequency window being swapped.

Args:
    metadata_key (str): Key in the input `data` dictionary to retrieve the reference image(s).
        The value should be a sequence (e.g., list) of numpy arrays (pre-loaded images).
        Default: "fda_metadata".
    beta_limit (tuple[float, float] | float): Controls the extent of the low-frequency
        spectrum swap. A larger beta means more components are swapped. Corresponds to the L
        parameter in the original paper. Should be in the range [0, 0.5]. Sampling is uniform
        within the provided range [min, max]. Default: (0, 0.1).
    p (float): Probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Requires at least one reference image to be provided via the `metadata_key` argument.
    - The `reference_images` and `read_fn` constructor arguments are deprecated.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create sample images for demonstration
    >>> # Source image: synthetic or simulated image (e.g., from a rendered game environment)
    >>> source_img = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> # Create a pattern in the source image
    >>> source_img[20:80, 20:80, 0] = 200  # Red square
    >>> source_img[40:60, 40:60, 1] = 200  # Green inner square
    >>>
    >>> # Target domain image: real-world image with different texture/frequency characteristics
    >>> # For this example, we'll create an image with different frequency patterns
    >>> target_img = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> for i in range(100):
    ...     for j in range(100):
    ...         # Create a high-frequency pattern
    ...         target_img[i, j, 0] = ((i + j) % 8) * 30
    ...         target_img[i, j, 1] = ((i - j) % 8) * 30
    ...         target_img[i, j, 2] = ((i * j) % 8) * 30
    >>>
    >>> # Example 1: FDA with minimal adaptation (small beta value)
    >>> # This will subtly adjust the frequency characteristics
    >>> minimal_fda = A.Compose([
    ...     A.FDA(
    ...         beta_limit=(0.01, 0.05),  # Small beta range for subtle adaptation
    ...         metadata_key="target_domain",  # Custom metadata key
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> # Apply the transform with minimal adaptation
    >>> minimal_result = minimal_fda(
    ...     image=source_img,
    ...     target_domain=[target_img]  # Pass reference image via custom metadata key
    ... )
    >>> minimal_adapted_img = minimal_result["image"]
    >>>
    >>> # Example 2: FDA with moderate adaptation (medium beta value)
    >>> moderate_fda = A.Compose([
    ...     A.FDA(
    ...         beta_limit=(0.1, 0.2),  # Medium beta range
    ...         metadata_key="target_domain",
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> moderate_result = moderate_fda(image=source_img, target_domain=[target_img])
    >>> moderate_adapted_img = moderate_result["image"]
    >>>
    >>> # Example 3: FDA with strong adaptation (larger beta value)
    >>> strong_fda = A.Compose([
    ...     A.FDA(
    ...         beta_limit=(0.3, 0.5),  # Larger beta range (upper limit is MAX_BETA_LIMIT)
    ...         metadata_key="target_domain",
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> strong_result = strong_fda(image=source_img, target_domain=[target_img])
    >>> strong_adapted_img = strong_result["image"]
    >>>
    >>> # Example 4: Using multiple target domain images
    >>> # Creating a list of target domain images with different characteristics
    >>> target_imgs = [target_img]
    >>>
    >>> # Add another target image with different pattern
    >>> another_target = np.zeros((100, 100, 3), dtype=np.uint8)
    >>> for i in range(100):
    ...     for j in range(100):
    ...         another_target[i, j, 0] = (i // 10) * 25
    ...         another_target[i, j, 1] = (j // 10) * 25
    ...         another_target[i, j, 2] = ((i + j) // 10) * 25
    >>> target_imgs.append(another_target)
    >>>
    >>> # Using default FDA settings with multiple target images
    >>> multi_target_fda = A.Compose([
    ...     A.FDA(p=1.0)  # Using default settings with default metadata_key="fda_metadata"
    ... ])
    >>>
    >>> # A random target image will be selected from the list for each application
    >>> multi_target_result = multi_target_fda(image=source_img, fda_metadata=target_imgs)
    >>> adapted_image = multi_target_result["image"]

References:
    - FDA: https://github.com/YanchaoYang/FDA
    - FDA: https://openaccess.thecvf.com/content_CVPR_2020/papers/Yang_FDA_Fourier_Domain_Adaptation_for_Semantic_Segmentation_CVPR_2020_paper.pdf

c                  J    \ rS rSr% S\S'   \" S5      \SS j5       5       rSrg)FDA.InitSchemai"  r   
beta_limitc                ~    S[         4nUS   US   s=::  a  US   s=::  a	  US   ::  d  O  [        SU SU S35      eU$ )Nr      zValues should be in the range z got  )MAX_BETA_LIMITr.   )clsvalueboundss      r2   _check_rangesFDA.InitSchema._check_ranges%  sQ     &F!9aAE!HAq	A #A&ugUV!WXXLr5   r6   N)r   r   r7   r   )	r8   r9   r:   r;   r<   r   classmethodr   r=   r6   r5   r2   rA   r   "  s'    $$		&		 
 
'	r5   rA   N)r   g?fda_metadatar   c                D   > [         TU ]  XXES9  [        SU5      U l        g )Nrp   r   )rD   rE   r	   r   )r0   r   r   r!   r#   rC   rF   s         r2   rE   FDA.__init__-  s(     	*:Zfl4jAr5   c                    U R                  U5      nUS   SS u  pE[        R                  " X5U4S9nX`R                  R                  " U R
                  6 S.$ )z:Generate parameters for the transform based on input data.shapeN   )dsize)target_imagebeta)rX   cv2resizerP   ru   r   )r0   rw   rT   r   heightwidthtarget_image_resizeds          r2   rx    FDA.get_params_dependent_on_data8  sX    006w+  #zz,foN 4nn>T>TVZVeVe>fggr5   c                    [        XU5      $ )a  Apply Fourier Domain Adaptation to the input image.

Args:
    img (np.ndarray): Input image to be transformed
    target_image (np.ndarray): Target domain image for adaptation
    beta (float): Coefficient controlling the extent of frequency component swapping
    **params (Any): Additional parameters

Returns:
    np.ndarray: Transformed image with adapted frequency components

)r   )r0   r|   r   r   rw   s        r2   r}   	FDA.applyB  s    & )DAAr5   )r   )
r   r   r   ztuple[float, float] | floatr!   r    r#   r"   rC   r^   r   )
r|   r`   r   r`   r   r^   rw   r   r7   r`   r   rd   s   @r2   r   r     s    GR	3 	 262:6D*	B.	B 0	B 4		B
 	B 	B 	BhBB !B 	B
 B 
B Br5   r   c                     ^  \ rS rSrSr " S S\5      rSS\SSS	4           SU 4S
 jjjrSS jr	SS jr
SrU =r$ )r   iX  a  Adapts the pixel value distribution of an input image to match a reference image
using statistical transformations (PCA, StandardScaler, or MinMaxScaler).

This transform aims to harmonize images from different domains by aligning their pixel-level
statistical properties.

Why use Pixel Distribution Adaptation?
**Domain Adaptation:** Useful for aligning images across domains with differing pixel statistics
(e.g., caused by different sensors, lighting, or post-processing).

*Use Case Example:* Consider having labeled data from Scanner A and needing the model to perform
well on unlabeled data from Scanner B, where images might have different overall brightness,
contrast, or color biases. This transform can adapt the labeled images from Scanner A to
mimic the pixel distribution *style* of the images from Scanner B, potentially improving
generalization without needing labels for Scanner B data.

How it works:
1. A chosen statistical transform (`transform_type`) is fitted to both the input (source) image
   and the reference (target) image separately.
2. The input image is transformed using the transform fitted on it (moving it to a standardized space).
3. The inverse transform *fitted on the reference image* is applied to the result from step 2
   (moving the standardized input into the reference image's statistical space).
4. The result is optionally blended with the original input image using `blend_ratio`.

Args:
    metadata_key (str): Key in the input `data` dictionary to retrieve the reference image(s).
        The value should be a sequence (e.g., list) of numpy arrays (pre-loaded images).
        Default: "pda_metadata".
    blend_ratio (tuple[float, float]): Specifies the minimum and maximum blend ratio for mixing
        the adapted image with the original. A value of 0 means the original image is returned,
        1 means the fully adapted image is returned. A random value within this range [min, max]
        is sampled for each application. Default: (0.25, 1.0).
    transform_type (Literal["pca", "standard", "minmax"]): Specifies the type of statistical
        transformation to apply:
        - "pca": Principal Component Analysis.
        - "standard": StandardScaler (zero mean, unit variance).
        - "minmax": MinMaxScaler (scales to [0, 1] range).
        Default: "pca".
    p (float): The probability of applying the transform. Default: 0.5.

Targets:
    image

Image types:
    uint8, float32

Note:
    - Requires at least one reference image to be provided via the `metadata_key` argument.
    - The `reference_images` and `read_fn` constructor arguments are deprecated.

Examples:
    >>> import numpy as np
    >>> import albumentations as A
    >>> import cv2
    >>>
    >>> # Create sample images for demonstration
    >>> # Source image: simulated image from domain A (e.g., medical scan from one scanner)
    >>> source_image = np.random.normal(100, 20, (100, 100, 3)).clip(0, 255).astype(np.uint8)
    >>>
    >>> # Reference image: image from domain B with different statistical properties
    >>> # (e.g., scan from a different scanner with different intensity distribution)
    >>> reference_image = np.random.normal(150, 30, (100, 100, 3)).clip(0, 255).astype(np.uint8)
    >>>
    >>> # Example 1: Using PCA transformation (default)
    >>> pca_transform = A.Compose([
    ...     A.PixelDistributionAdaptation(
    ...         transform_type="pca",
    ...         blend_ratio=(0.8, 1.0),  # Strong adaptation
    ...         metadata_key="reference_images",
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> # Apply the transform with the reference image
    >>> pca_result = pca_transform(
    ...     image=source_image,
    ...     reference_images=[reference_image]
    ... )
    >>>
    >>> # Get the adapted image
    >>> pca_adapted_image = pca_result["image"]
    >>>
    >>> # Example 2: Using StandardScaler transformation
    >>> standard_transform = A.Compose([
    ...     A.PixelDistributionAdaptation(
    ...         transform_type="standard",
    ...         blend_ratio=(0.5, 0.7),  # Moderate adaptation
    ...         metadata_key="reference_images",
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> standard_result = standard_transform(
    ...     image=source_image,
    ...     reference_images=[reference_image]
    ... )
    >>> standard_adapted_image = standard_result["image"]
    >>>
    >>> # Example 3: Using MinMaxScaler transformation
    >>> minmax_transform = A.Compose([
    ...     A.PixelDistributionAdaptation(
    ...         transform_type="minmax",
    ...         blend_ratio=(0.3, 0.5),  # Subtle adaptation
    ...         metadata_key="reference_images",
    ...         p=1.0
    ...     )
    ... ])
    >>>
    >>> minmax_result = minmax_transform(
    ...     image=source_image,
    ...     reference_images=[reference_image]
    ... )
    >>> minmax_adapted_image = minmax_result["image"]
    >>>
    >>> # Example 4: Using multiple reference images
    >>> # When multiple reference images are provided, one is randomly selected for each transformation
    >>> multiple_references = [
    ...     reference_image,
    ...     np.random.normal(180, 25, (100, 100, 3)).clip(0, 255).astype(np.uint8),
    ...     np.random.normal(120, 40, (100, 100, 3)).clip(0, 255).astype(np.uint8)
    ... ]
    >>>
    >>> multi_ref_transform = A.Compose([
    ...     A.PixelDistributionAdaptation(p=1.0)  # Using default settings
    ... ])
    >>>
    >>> # Each time the transform is applied, it randomly selects one of the reference images
    >>> multi_ref_result = multi_ref_transform(
    ...     image=source_image,
    ...     pda_metadata=multiple_references  # Using the default metadata key
    ... )
    >>> adapted_image = multi_ref_result["image"]

References:
    Qudida: https://github.com/arsenyinfo/qudida

c                  *    \ rS rSr% S\S'   S\S'   Srg)&PixelDistributionAdaptation.InitSchemai  ri   rj   $Literal['pca', 'standard', 'minmax']transform_typer6   Nrk   r6   r5   r2   rA   r     s    
 	

 =<r5   rA   N)g      ?rl   pcapda_metadatar   c                :   > [         TU ]  XXVS9  X l        X@l        g ro   )rD   rE   rj   r   )r0   r   rj   r!   r   r#   rC   rF   s          r2   rE   $PixelDistributionAdaptation.__init__  s%     	*:Zfl&,r5   c                p    U R                  U5      nUU R                  R                  " U R                  6 S.$ )z!Get parameters for the transform.rs   rt   rv   s       r2   rx   8PixelDistributionAdaptation.get_params_dependent_on_data  s7    33D9.>>1143C3CD
 	
r5   c                .    [        UUUU R                  S9$ )a  Apply pixel distribution adaptation to the input image.

Args:
    img (np.ndarray): Input image to be transformed
    reference_image (np.ndarray): Reference image for distribution adaptation
    blend_ratio (float): Blending factor between the original and adapted image
    **params (Any): Additional parameters

Returns:
    np.ndarray: Transformed image with pixel distribution adapted to the reference image

)refweightr   )r   r   r{   s        r2   r}   !PixelDistributionAdaptation.apply   s#     (..	
 	
r5   )rj   r   )r   r   rj   r   r!   r    r   r   r#   r"   rC   r^   r   r   r   rd   s   @r2   r   r   X  s~    HT=3 = 26+66D?D*-.- )- 4	-
 =- - - -

 
r5   r   ))ra   
__future__r   r+   collections.abcr   typingr   r   r   r   r	   r   numpyrN   pydanticr
   r   r   typing_extensionsr   @albumentations.augmentations.mixing.domain_adaptation_functionalr   r   r   "albumentations.augmentations.utilsr   albumentations.core.pydanticr   r   r   (albumentations.core.transforms_interfacer   r   __all__r   r   r?   r   r   r   r6   r5   r2   <module>r      s    #  $ : : 
  E E " 
 > \ \ ` %< .~'- ~'BVB, VBr}B
 }B@z
"6 z
r5   