diff --git a/src/python/py/models/builder.py b/src/python/py/models/builder.py index 83cbf59f5..341a43a3c 100644 --- a/src/python/py/models/builder.py +++ b/src/python/py/models/builder.py @@ -295,7 +295,7 @@ def __init__(self, config, io_dtype, onnx_dtype, ep, cache_dir, extra_options): # Quantization-specific variables (INT4, INT8, etc.) self.quant_attrs = { "int4": { - "accuracy_level": int(extra_options.get("int4_accuracy_level", 0)), # Default is 0 for non-QDQ formats, default is 4 for QDQ formats + "accuracy_level": int(extra_options.get("int4_accuracy_level", 4 if self.ep == "cpu" else 0)), # Default is 0 for non-QDQ formats, default is 4 for QDQ formats "block_size": int(extra_options.get("int4_block_size", 32)), "is_symmetric": extra_options.get("int4_is_symmetric", True), "op_types_to_quantize": extra_options.get("int4_op_types_to_quantize", ("MatMul", )), @@ -3324,6 +3324,7 @@ def get_args(): 3 is bf16. 2 is fp16. 1 is fp32. + Default is 4 for the CPU EP and 0 for non-CPU EPs. int4_block_size = 16/32/64/128/256: Specify the block_size for int4 quantization. int4_is_symmetric = Quantize the weights symmetrically. Default is true. If true, quantization is done to int4. If false, quantization is done to uint4. @@ -3354,7 +3355,7 @@ def get_args(): If enabled, all nodes being placed on the CUDA EP is the prerequisite for the CUDA graph to be used correctly. It is not guaranteed that CUDA graph be enabled as it depends on the model and the graph structure. use_8bits_moe = Use 8-bit quantization for MoE layers. Default is false. - If true, the QMoE op will use 4-bit quantization. If false, the QMoE op will use 8-bits quantization. + If true, the QMoE op will use 8-bit quantization. If false, the QMoE op will use 4-bit quantization. use_qdq = Use the QDQ decomposition for ops. Use this option when you want to use quantize-dequantize ops. For example, you will have a quantized MatMul op instead of the MatMulNBits op. adapter_path = Path to folder on disk containing the adapter files (adapter_config.json and adapter model weights).