-
Notifications
You must be signed in to change notification settings - Fork 48
/
setup.py
122 lines (110 loc) · 4.18 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import importlib.util
import os
import re
import subprocess
from setuptools import find_packages, setup
# Ensure we match the version set in src/optimum-benchmark/version.py
try:
filepath = "optimum_benchmark/version.py"
with open(filepath) as version_file:
(__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
except Exception as error:
assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
MIN_OPTIMUM_VERSION = "1.18.0"
INSTALL_REQUIRES = [
# HF dependencies
"transformers",
"accelerate",
"datasets",
# Hydra
"hydra-core",
"omegaconf",
# CPU
"psutil",
# Reporting
"typing-extensions",
"flatten_dict",
"colorlog",
"pandas",
"rich",
]
try:
subprocess.run(["nvidia-smi"], check=True)
IS_NVIDIA_SYSTEM = True
except Exception:
IS_NVIDIA_SYSTEM = False
try:
subprocess.run(["rocm-smi"], check=True)
IS_ROCM_SYSTEM = True
except Exception:
IS_ROCM_SYSTEM = False
USE_CUDA = (os.environ.get("USE_CUDA", None) == "1") or IS_NVIDIA_SYSTEM
USE_ROCM = (os.environ.get("USE_ROCM", None) == "1") or IS_ROCM_SYSTEM
if USE_CUDA:
INSTALL_REQUIRES.append("nvidia-ml-py")
if USE_ROCM:
PYRSMI = "pyrsmi@git+https://github.com/ROCm/pyrsmi.git"
INSTALL_REQUIRES.append(PYRSMI)
if not importlib.util.find_spec("amdsmi"):
print(
"ROCm GPU detected without amdsmi installed. You won't be able to run process-specific VRAM tracking. "
"Please install amdsmi from https://github.com/ROCm/amdsmi to enable this feature."
)
EXTRAS_REQUIRE = {
"quality": ["ruff"],
"testing": ["pytest", "hydra-joblib-launcher"],
# optimum backends
"ipex": [f"optimum[ipex]>={MIN_OPTIMUM_VERSION}"],
"openvino": [f"optimum[openvino,nncf]>={MIN_OPTIMUM_VERSION}"],
"onnxruntime": [f"optimum[onnxruntime]>={MIN_OPTIMUM_VERSION}"],
"onnxruntime-gpu": [f"optimum[onnxruntime-gpu]>={MIN_OPTIMUM_VERSION}"],
"neural-compressor": [f"optimum[neural-compressor]>={MIN_OPTIMUM_VERSION}"],
"torch-ort": ["torch-ort", "onnxruntime-training", f"optimum>={MIN_OPTIMUM_VERSION}"],
# other backends
"llama-cpp": ["llama-cpp-python"],
"llm-swarm": ["llm-swarm"],
"py-txi": ["py-txi"],
"vllm": ["vllm"],
# optional dependencies
"autoawq": ["autoawq"],
"auto-gptq": ["optimum", "auto-gptq"],
"sentence-transformers": ["sentence-transformers"],
"bitsandbytes": ["bitsandbytes"],
"codecarbon": ["codecarbon"],
"flash-attn": ["flash-attn"],
"deepspeed": ["deepspeed"],
"diffusers": ["diffusers"],
"timm": ["timm"],
"peft": ["peft"],
}
setup(
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
entry_points={"console_scripts": ["optimum-benchmark=optimum_benchmark.cli:main"]},
description="Optimum-Benchmark is a unified multi-backend utility for benchmarking "
"Transformers, Timm, Diffusers and Sentence-Transformers with full support of "
"Optimum's hardware optimizations & quantization schemes.",
url="https://github.com/huggingface/optimum-benchmark",
classifiers=[
"Intended Audience :: Education",
"Intended Audience :: Developers",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="benchmark, transformers, quantization, pruning, optimization, training, inference, onnx, onnx runtime, intel, "
"habana, graphcore, neural compressor, ipex, ipu, hpu, llm-swarm, py-txi, vllm, llama-cpp, auto-gptq, autoawq, "
"sentence-transformers, bitsandbytes, codecarbon, flash-attn, deepspeed, diffusers, timm, peft",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc. Special Ops Team",
include_package_data=True,
name="optimum-benchmark",
version=__version__,
license="Apache",
)