From 317b9133ccde44d82c99c4b312cf8e3e4afc925f Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Tue, 2 Jul 2024 13:13:46 +0800 Subject: [PATCH 1/2] Clean INC import (#1645) Signed-off-by: changwangss --- .../transformers/config.py | 2 +- .../transformers/utils/utility_tf.py | 107 ------------------ 2 files changed, 1 insertion(+), 108 deletions(-) delete mode 100644 intel_extension_for_transformers/transformers/utils/utility_tf.py diff --git a/intel_extension_for_transformers/transformers/config.py b/intel_extension_for_transformers/transformers/config.py index a0009e7d3ed..f5918267491 100644 --- a/intel_extension_for_transformers/transformers/config.py +++ b/intel_extension_for_transformers/transformers/config.py @@ -19,7 +19,7 @@ import yaml from enum import Enum -from neural_compressor.conf.dotdict import DotDict +from neural_compressor.utils.utility import DotDict from .utils.metrics import Metric from .utils.objectives import Objective, performance diff --git a/intel_extension_for_transformers/transformers/utils/utility_tf.py b/intel_extension_for_transformers/transformers/utils/utility_tf.py deleted file mode 100644 index f19785740af..00000000000 --- a/intel_extension_for_transformers/transformers/utils/utility_tf.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utils for tensorflow framework.""" - -import os -import json -from collections import OrderedDict, UserDict -from neural_compressor.experimental import common - -TMPPATH = os.path.join('tmp', 'model') -TEACHERPATH = os.path.join('tmp', 'teacher_model') -class TFDataloader(object): - """Tensorflow dataloader. - - Args: - dataset (string): Dataset - """ - - def __init__(self, dataset, batch_size=None): - """Init an instance.""" - self.dataset = dataset - self.batch_size = batch_size - - def __iter__(self): - """Get the iteration of dataset.""" - for inputs, labels in self.dataset: - if isinstance(inputs, dict) or isinstance(inputs, OrderedDict) \ - or isinstance(inputs, UserDict): - for name in inputs.keys(): - inputs[name] = inputs[name].numpy() - elif isinstance(inputs, list) or isinstance(inputs, tuple): - inputs = [input.numpy() for input in inputs] - else: - inputs = inputs.numpy() - - if isinstance(labels, dict) or isinstance(labels, OrderedDict) \ - or isinstance(labels, UserDict): # pragma: no cover - for name in labels.keys(): - labels[name] = labels[name].numpy() - elif isinstance(labels, list) or isinstance(labels, tuple): - labels = [label.numpy() for label in labels] - else: - labels = labels.numpy() - yield inputs, labels - - def __len__(self): - """Return the length of dataset.""" - return len(self.dataset) - - -def distributed_init(worker_addresses, type='worker', index=0): - """Init distribute environment. - - Args: - worker_addresses: Addresses of all nodes. - type: The type of node, such as worker. - index: When index is 0, the node treat as a chief. - """ - tf_config = { - 'cluster': { - 'worker': worker_addresses - }, - 'task': {'type': type, 'index': index} - } - os.environ['TF_CONFIG'] = json.dumps(tf_config) - -def _is_chief(task_type, task_id): - # here only consider the case in which TF_CONFIG task_type is set as worker - # and task_id=0 represents the chief - return (task_type == 'worker' and task_id == 0) - -# get model folder path for the distributed environment -def get_filepath(base_dirpath, task_type, task_id): - """Get model folder path for the distributed environment. - - Args: - base_dirpath: The basic folder path. - task_type: Task_type is set as worker. - task_id: Task id. When task_id=0, the node treat as a chief. - """ - if task_type is None: # single node - return base_dirpath - elif _is_chief(task_type, task_id): - return os.path.join(base_dirpath, 'chief') - else: - return os.path.join(base_dirpath, 'worker_' + str(task_id)) - - -# convert a Keras model to SavedModel -def keras2SavedModel(model): # pragma: no cover - """Transfer keras model into save_model.""" - model = common.Model(model) - return model.model From 86087dc4a1d0ed74c1360c7906cd4eae9a59704e Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Tue, 2 Jul 2024 17:29:30 +0800 Subject: [PATCH 2/2] Set lm-eval to 0.4.2 (#1647) Signed-off-by: changwangss --- .github/workflows/script/formatScan/pylint.sh | 2 +- .../pytorch/language-modeling/inference/requirements.txt | 2 +- .../pytorch/language-modeling/pruning/requirements.txt | 2 +- .../pytorch/language-modeling/quantization/requirements.txt | 2 +- .../huggingface/pytorch/text2text-generation/requirements.txt | 2 +- examples/modelscope/requirements.txt | 2 +- .../neural_chat/requirements_cpu.txt | 2 +- .../neural_chat/requirements_hpu.txt | 2 +- .../neural_chat/requirements_win.txt | 2 +- .../neural_chat/tests/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/script/formatScan/pylint.sh b/.github/workflows/script/formatScan/pylint.sh index eeb71beb604..41e22c470b9 100644 --- a/.github/workflows/script/formatScan/pylint.sh +++ b/.github/workflows/script/formatScan/pylint.sh @@ -28,7 +28,7 @@ else echo "Not found requirements.txt file." fi # install packages -pip install lm-eval +pip install lm-eval==0.4.2 pip install accelerate nlpaug nltk schema optimum-intel optimum peft pip install --upgrade --force-reinstall transformers==4.36.2 pip install optimum-habana diff --git a/examples/huggingface/pytorch/language-modeling/inference/requirements.txt b/examples/huggingface/pytorch/language-modeling/inference/requirements.txt index e87bc861ca8..cd6cd604899 100644 --- a/examples/huggingface/pytorch/language-modeling/inference/requirements.txt +++ b/examples/huggingface/pytorch/language-modeling/inference/requirements.txt @@ -1,4 +1,4 @@ transformers accelerate sentencepiece != 0.1.92 -lm-eval +lm-eval==0.4.2 diff --git a/examples/huggingface/pytorch/language-modeling/pruning/requirements.txt b/examples/huggingface/pytorch/language-modeling/pruning/requirements.txt index b60bac56d76..a1ea63132a8 100644 --- a/examples/huggingface/pytorch/language-modeling/pruning/requirements.txt +++ b/examples/huggingface/pytorch/language-modeling/pruning/requirements.txt @@ -7,5 +7,5 @@ transformers torch==2.0.1 tqdm neural_compressor -lm-eval +lm-eval==0.4.2 diff --git a/examples/huggingface/pytorch/language-modeling/quantization/requirements.txt b/examples/huggingface/pytorch/language-modeling/quantization/requirements.txt index c7b5b6fcf83..36ee5a1b55a 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/requirements.txt +++ b/examples/huggingface/pytorch/language-modeling/quantization/requirements.txt @@ -9,5 +9,5 @@ wandb einops neural-compressor pytest==8.0.0 -lm-eval +lm-eval==0.4.2 git+https://github.com/huggingface/peft.git@6c44096c7b8d55a2ecf24be9bc68393467e1584a diff --git a/examples/huggingface/pytorch/text2text-generation/requirements.txt b/examples/huggingface/pytorch/text2text-generation/requirements.txt index 8a585f9fd9e..73e4ae2e655 100644 --- a/examples/huggingface/pytorch/text2text-generation/requirements.txt +++ b/examples/huggingface/pytorch/text2text-generation/requirements.txt @@ -11,4 +11,4 @@ neural-compressor optimum-intel > 1.12.0 onnxruntime intel-extension-for-pytorch -lm-eval +lm-eval==0.4.2 diff --git a/examples/modelscope/requirements.txt b/examples/modelscope/requirements.txt index bc7a3e65de6..b04bd189db0 100644 --- a/examples/modelscope/requirements.txt +++ b/examples/modelscope/requirements.txt @@ -1,6 +1,6 @@ intel_extension_for_transformers neural-speed -lm-eval +lm-eval==0.4.2 sentencepiece gguf --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt index 6097d2e2a0d..7b38113697b 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt @@ -7,7 +7,7 @@ fastapi fschat==0.2.32 huggingface_hub intel_extension_for_pytorch==2.3.0 -lm-eval +lm-eval==0.4.2 neural-compressor neural_speed==1.0a0 numpy==1.23.5 diff --git a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt index 1c6dfa0d47a..f3983b6d3c5 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_hpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_hpu.txt @@ -4,7 +4,7 @@ evaluate fastapi fschat==0.2.35 huggingface_hub -lm-eval +lm-eval==0.4.2 neural-compressor numpy==1.23.5 optimum diff --git a/intel_extension_for_transformers/neural_chat/requirements_win.txt b/intel_extension_for_transformers/neural_chat/requirements_win.txt index c417c5ca01a..56ac6027ab4 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_win.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_win.txt @@ -6,7 +6,7 @@ fastapi fschat==0.2.35 huggingface_hub intel-extension-for-transformers -lm-eval +lm-eval==0.4.2 neural-compressor numpy==1.23.5 optimum diff --git a/intel_extension_for_transformers/neural_chat/tests/requirements.txt b/intel_extension_for_transformers/neural_chat/tests/requirements.txt index a4243865087..97a46d2e502 100644 --- a/intel_extension_for_transformers/neural_chat/tests/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/tests/requirements.txt @@ -38,7 +38,7 @@ langchain-community==0.0.27 langchain_core==0.1.35 langid librosa -lm-eval +lm-eval==0.4.2 markdown neural-compressor neural_speed==1.0a0