
    h(                     >   S SK r S SKrS SKrSSKJr  \R
                  " \5      rS r\S:X  Gao  \" 5       r	\	R                  (       aD  \	R                  (       a3  \	R                  (       a"  \R                  S5        \R                  " 5         \	R                  (       d3  \	R                  (       a"  \R                  S5        \R                  " 5         \R!                  S\	R"                  5        \R!                  S	\	R$                  5        \" \	R"                  \	R$                  \	R                  \	R                  \	R                  \	R&                  \	R(                  \	R*                  \	R,                  \	R                  \	R.                  \	R0                  \	R2                  5        gg)
    N   )quant_pre_processc                  H   [         R                  " SS9n U R                  SSSS9  U R                  SSSS9  U R                  S	[        S
SS9  U R                  S[        S
SS9  U R                  S[        S
SS9  U R                  SSSS
S9  U R                  SS[        SS9  U R                  SSSS
S9  U R                  SS[        SS9  U R                  SSSS
S9  U R                  S S!SS
S9  U R                  S"S#S S$9  U R                  S%S&[        S'S9  U R                  5       $ )(Na  Model optimizer and shape inferencer, in preparation for quantization,
Consists of three optional steps:
1. Symbolic shape inference (best for transformer models).
2. Model optimization.
3. ONNX shape inference.

Model quantization with QDQ format, i.e. inserting QuantizeLinear/DeQuantizeLinear on
the tensor, requires tensor shape information to perform its best. Currently, shape inferencing
works best with optimized model. As a result, it is highly recommended to run quantization
on optimized model with shape information. This is the tool for optimization and shape
inferencing.

Essentially this tool performs the following three (skippable) steps:

1. Symbolic shape inference.
2. Model optimization
3. ONNX shape inference)descriptionz--inputTzPath to the input model file)requiredhelpz--outputzPath to the output model filez--skip_optimizationFzSkip model optimization step if true. It's a known issue that ORT optimization has difficulty with model size greater than 2GB, rerun with this option to get around this issue.)typedefaultr   z--skip_onnx_shapezSkip ONNX shape inference. Symbolic shape inference is most effective with transformer based models. Skipping all shape inferences may reduce the effectiveness of quantization, as a tensor with unknown shape can not be quantized.z--skip_symbolic_shapezSkip symbolic shape inference. Symbolic shape inference is most effective with transformer based models. Skipping all shape inferences may reduce the effectiveness of quantization, as a tensor with unknown shape can not be quantized.z--auto_mergez:Automatically merge symbolic dims when confliction happens
store_true)r   actionr
   z	--int_maxzGmaximum value for integer to be treated as boundless for ops like slicei)r   r	   r
   z--guess_output_rankz;guess output rank to be the same as input 0 for unknown opsz	--verbosezHPrints detailed logs of inference, 0: turn off, 1: warnings, 3: detailedr   z--save_as_external_dataz%Saving an ONNX model to external dataz--all_tensors_to_one_filez(Saving all the external data to one filez--external_data_locationz+The file location to save the external file)r   r
   z--external_data_size_thresholdz$The size threshold for external datai   )argparseArgumentParseradd_argumentboolint
parse_args)parsers    ]/var/www/fran/franai/venv/lib/python3.13/site-packages/onnxruntime/quantization/preprocess.pyparse_argumentsr      s   $$F( 	D7UV

T8WX
1	   '	   4	   I	   V	   J	   W	   !4	   #7	   ":  
 (3	       __main__z9Skipping all three steps, nothing to be done. Quitting...z:ORT model optimization does not support external data yet!zinput model: %szoutput model: %s)r   loggingsysshape_inferencer   	getLogger__name__loggerr   argsskip_optimizationskip_onnx_shapeskip_symbolic_shapeerrorexitsave_as_external_datainfoinputoutput
auto_mergeint_maxguess_output_rankverboseall_tensors_to_one_fileexternal_data_locationexternal_data_size_threshold r   r   <module>r0      s0     
 .			8	$`F zD$"6"64;S;SPQ
""(B(BQR

KK!4::.
KK"DKK0

  ""$$##)) r   