From dee416044a024534251e4a298070e3f37b614121 Mon Sep 17 00:00:00 2001 From: kunal-vaishnavi <115581922+kunal-vaishnavi@users.noreply.github.com> Date: Thu, 20 Feb 2025 17:45:13 -0800 Subject: [PATCH] Fix default value for INT4 accuracy level (#1265) ### Description This PR sets the INT4 accuracy level to 4 by default for the CPU EP and 0 for non-CPU EPs. ### Motivation and Context This PR ensures that any model created for the CPU EP receives the best performance. It also helps [this issue](https://github.com/microsoft/onnxruntime-genai/issues/1098). --- src/python/py/models/builder.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/python/py/models/builder.py b/src/python/py/models/builder.py index 83cbf59f5..341a43a3c 100644 --- a/src/python/py/models/builder.py +++ b/src/python/py/models/builder.py @@ -295,7 +295,7 @@ def __init__(self, config, io_dtype, onnx_dtype, ep, cache_dir, extra_options): # Quantization-specific variables (INT4, INT8, etc.) self.quant_attrs = { "int4": { - "accuracy_level": int(extra_options.get("int4_accuracy_level", 0)), # Default is 0 for non-QDQ formats, default is 4 for QDQ formats + "accuracy_level": int(extra_options.get("int4_accuracy_level", 4 if self.ep == "cpu" else 0)), # Default is 0 for non-QDQ formats, default is 4 for QDQ formats "block_size": int(extra_options.get("int4_block_size", 32)), "is_symmetric": extra_options.get("int4_is_symmetric", True), "op_types_to_quantize": extra_options.get("int4_op_types_to_quantize", ("MatMul", )), @@ -3324,6 +3324,7 @@ def get_args(): 3 is bf16. 2 is fp16. 1 is fp32. + Default is 4 for the CPU EP and 0 for non-CPU EPs. int4_block_size = 16/32/64/128/256: Specify the block_size for int4 quantization. int4_is_symmetric = Quantize the weights symmetrically. Default is true. If true, quantization is done to int4. If false, quantization is done to uint4. @@ -3354,7 +3355,7 @@ def get_args(): If enabled, all nodes being placed on the CUDA EP is the prerequisite for the CUDA graph to be used correctly. It is not guaranteed that CUDA graph be enabled as it depends on the model and the graph structure. use_8bits_moe = Use 8-bit quantization for MoE layers. Default is false. - If true, the QMoE op will use 4-bit quantization. If false, the QMoE op will use 8-bits quantization. + If true, the QMoE op will use 8-bit quantization. If false, the QMoE op will use 4-bit quantization. use_qdq = Use the QDQ decomposition for ops. Use this option when you want to use quantize-dequantize ops. For example, you will have a quantized MatMul op instead of the MatMulNBits op. adapter_path = Path to folder on disk containing the adapter files (adapter_config.json and adapter model weights).