-
Notifications
You must be signed in to change notification settings - Fork 12
/
vllm_deployment.yaml
96 lines (95 loc) · 1.92 KB
/
vllm_deployment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
apiVersion: apps/v1
kind: Deployment
metadata:
name: vllm
labels:
app: vllm
spec:
replicas: 1
selector:
matchLabels:
app: vllm
template:
metadata:
labels:
app: vllm
spec:
containers:
- name: vllm-container
image: quay.io/chenw615/vllm_dra
# command: ["/bin/sh", "-c"]
# #changes below command to run vLLM workload.
# args: ["sleep 99999"]
command:
- "python"
- "-m"
- "vllm.entrypoints.openai.api_server"
- "--model"
- "facebook/opt-125m"
ports:
- containerPort: 8000
env:
- name: HUGGING_FACE_HUB_TOKEN
valueFrom:
secretKeyRef:
name: huggingface-secret
key: HF_TOKEN
- name: MODEL_NAME
value: "facebook/opt-125m"
- name: TRANSFORMERS_CACHE
value: "/workspace/huggingface/"
- name: HF_HOME
value: "/workspace/huggingface/"
- name: NUMBA_DISABLE_JIT
value: "1"
- name: NUMBA_CACHE_DIR
value: "/workspace/huggingface/"
resources:
limits:
nvidia.com/mig-3g.20gb: 1
---
apiVersion: v1
kind: Service
metadata:
name: vllm
spec:
type: ClusterIP
ports:
- port: 8000
targetPort: 8000
name: http
selector:
app: vllm
---
apiVersion: v1
kind: Secret
metadata:
name: huggingface-secret
type: Opaque
data:
HF_TOKEN: <YOUR_TOKEN> # Base64-encoded value of 'your_huggingface_secret_token'
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: huggingface-cache-pvc
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
storageClassName: manual
hostPath:
path: /data/huggingface-cache
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: huggingface-cache-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: manual