forked from opendatahub-io/lm-evaluation-harness
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathpyproject.toml
110 lines (101 loc) · 3.11 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
[build-system]
requires = ["setuptools>=40.8.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "lm_eval"
version = "0.4.5"
authors = [
{name="EleutherAI", email="[email protected]"}
]
description = "A framework for evaluating language models"
readme = "README.md"
classifiers = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
requires-python = ">=3.8"
license = { "text" = "MIT" }
dependencies = [
"accelerate==1.0.1",
"evaluate==0.4.3",
"datasets==3.1.0",
"jsonlines==4.0.0",
"numexpr==2.10.1",
"peft==0.13.2",
"pybind11==2.13.6",
"pytablewriter==1.2.0",
"rouge-score==0.1.2",
"sacrebleu==2.4.3",
"scikit-learn==1.5.2",
"sqlitedict==2.1.0",
"torch==2.5.1",
"tqdm-multiprocess==0.0.11",
"transformers==4.45.2",
"zstandard==0.23.0",
"dill==0.3.8",
"word2number==1.1",
"more_itertools==10.5.0",
"unitxt==1.15.7",
"jiwer==3.0.5"
]
[tool.setuptools.packages.find]
include = ["lm_eval*"]
# required to include yaml files in pip installation
[tool.setuptools.package-data]
lm_eval = ["**/*.yaml", "tasks/**/*"]
[project.scripts]
lm-eval = "lm_eval.__main__:cli_evaluate"
lm_eval = "lm_eval.__main__:cli_evaluate"
[project.urls]
Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
[project.optional-dependencies]
api = ["requests==2.32.3", "aiohttp==3.10.10", "tenacity==9.0.0", "tqdm==4.66.6", "tiktoken==0.8.0"]
dev = ["pytest==8.3.3", "pytest-cov==6.0.0", "pytest-xdist==3.6.1", "pre-commit==4.0.1", "mypy==1.13.0"]
deepsparse = ["deepsparse-nightly[llm]>=1.8.0.20240404"]
gptq = ["auto-gptq[triton]>=0.6.0"]
hf_transfer = ["hf_transfer"]
ifeval = ["langdetect", "immutabledict", "nltk==3.9.1"]
neuronx = ["optimum[neuronx]"]
mamba = ["mamba_ssm", "causal-conv1d==1.0.2"]
math = ["sympy==1.13.1", "antlr4-python3-runtime==4.11"]
multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
optimum = ["optimum[openvino]"]
promptsource = ["promptsource>=0.2.3"]
sentencepiece = ["sentencepiece==0.2.0"]
sparseml = ["sparseml-nightly[llm]>=1.8.0.20240404"]
testing = ["pytest==8.3.3", "pytest-cov==6.0.0", "pytest-xdist==3.6.1"]
vllm = ["vllm>=0.4.2"]
zeno = ["pandas==2.2.3", "zeno-client"]
wandb = ["wandb>=0.16.3", "pandas==2.2.3", "numpy==2.1.2"]
ibm_watsonx_ai = ["ibm-watsonx-ai==1.1.23" ]
all = [
"lm_eval[anthropic]",
"lm_eval[dev]",
"lm_eval[deepsparse]",
"lm_eval[gptq]",
"lm_eval[hf_transfer]",
"lm_eval[ifeval]",
"lm_eval[mamba]",
"lm_eval[math]",
"lm_eval[multilingual]",
"lm_eval[openai]",
"lm_eval[promptsource]",
"lm_eval[sentencepiece]",
"lm_eval[sparseml]",
"lm_eval[testing]",
"lm_eval[vllm]",
"lm_eval[zeno]",
"lm_eval[wandb]",
"lm_eval[ibm_watsonx_ai]"
]
[tool.ruff.lint]
extend-select = ["I"]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["lm_eval"]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401","F402","F403"]
"utils.py" = ["F401"]