Skip to content

Commit

Permalink
relocate nvidia-modelopt dependency to srt
Browse files Browse the repository at this point in the history
  • Loading branch information
Edwardf0t1 committed Dec 31, 2024
1 parent 8df476b commit e847fac
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle"]

[project.optional-dependencies]
runtime_common = ["aiohttp", "decord", "fastapi",
"hf_transfer", "huggingface_hub", "interegular", "nvidia-modelopt", "modelscope",
"hf_transfer", "huggingface_hub", "interegular", "modelscope",
"orjson", "outlines>=0.0.44,<0.1.0",
"packaging", "pillow", "prometheus-client>=0.20.0",
"psutil", "pydantic", "python-multipart",
"pyzmq>=25.1.2", "torchao>=0.7.0", "uvicorn", "uvloop",
"xgrammar>=0.1.6"]
srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1,<=0.6.4.post1", "cuda-python", "flashinfer==0.1.6", "sgl-kernel>=0.0.2.post10"]
srt = ["sglang[runtime_common]", "torch", "vllm>=0.6.3.post1,<=0.6.4.post1", "cuda-python", "flashinfer==0.1.6", "sgl-kernel>=0.0.2.post10", "nvidia-modelopt"]

# HIP (Heterogeneous-computing Interface for Portability) for AMD
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
Expand Down

0 comments on commit e847fac

Please sign in to comment.