Skip to content

Commit 99b8fff

Browse files
committed
fix bug
Signed-off-by: He, Xin3 <[email protected]>
1 parent 1d812a0 commit 99b8fff

File tree

1 file changed

+2
-2
lines changed
  • neural_compressor/torch/quantization

1 file changed

+2
-2
lines changed

neural_compressor/torch/quantization/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,9 @@ class TorchBaseConfig(BaseConfig):
8585

8686
# re-write func _get_op_name_op_type_config to fallback op_type with string
8787
# because there are some special op_types for IPEX backend: `Linear&Relu`, `Linear&add`, ...
88-
def __init__(self, white_list):
88+
def __init__(self, white_list=DEFAULT_WHITE_LIST):
8989
super().__init__(white_list)
90+
self.params_list = self.__class__._generate_params_list()
9091
self.non_tunable_params: List[str] = ["white_list"]
9192

9293
def _get_op_name_op_type_config(self):
@@ -966,7 +967,6 @@ def __init__(
966967
output_dir (str): The output directory for temporary files (default is "./temp_auto_round").
967968
"""
968969
super().__init__(white_list=white_list)
969-
self.params_list = self.__class__._generate_params_list()
970970
# these two params are lists but not tunable
971971
self.non_tunable_params.extend(["options", "shared_layers"])
972972

0 commit comments

Comments
 (0)