From 86941522a28b5029a5e808031cb2c0011224f043 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Wed, 24 Jul 2024 14:52:12 +0800 Subject: [PATCH] Update modeling_auto.py Signed-off-by: Wang, Chang --- .../transformers/modeling/modeling_auto.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index 77c8008b063..63540e11a74 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -1833,7 +1833,6 @@ def load_low_bit(cls, pretrained_model_name_or_path, *model_args, **kwargs): if quantization_config.weight_dtype not in [ "fp8_e5m2", "fp8_e4m3", - "int4_fullrange" ]: model = build_woq_model(model, quantization_config) else: @@ -1950,7 +1949,6 @@ def replace_ipex_cpu_woq_linear(model, current_name=[]): if quantization_config.weight_dtype not in [ "fp8_e5m2", "fp8_e4m3", - "int4_fullrange" ] and not quantization_config.use_ipex: model = replace_linear( model,