-
Notifications
You must be signed in to change notification settings - Fork 12
/
vllm_job.yaml
76 lines (76 loc) · 1.6 KB
/
vllm_job.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
apiVersion: batch/v1
kind: Job
metadata:
name: vllm-job
labels:
app: vllm
spec:
completions: 1
parallelism: 1
template:
metadata:
labels:
app: vllm
spec:
containers:
- name: vllm-container
image: quay.io/chenw615/vllm_dra
command:
- "python"
- "-m"
- "vllm.entrypoints.openai.api_server"
- "--model"
- "facebook/opt-125m"
ports:
- containerPort: 8000
env:
- name: HUGGING_FACE_HUB_TOKEN
valueFrom:
secretKeyRef:
name: huggingface-secret
key: HF_TOKEN
- name: MODEL_NAME
value: "facebook/opt-125m"
- name: TRANSFORMERS_CACHE
value: "/workspace/huggingface/"
- name: HF_HOME
value: "/workspace/huggingface/"
- name: NUMBA_DISABLE_JIT
value: "1"
- name: NUMBA_CACHE_DIR
value: "/workspace/huggingface/"
resources:
limits:
nvidia.com/mig-3g.20gb: 1
volumeMounts:
- name: workspace-storage
mountPath: /workspace/huggingface/
restartPolicy: OnFailure
volumes:
- name: workspace-storage
persistentVolumeClaim:
claimName: workspace-storage
---
apiVersion: v1
kind: Service
metadata:
name: vllm
spec:
type: ClusterIP
ports:
- port: 8000
targetPort: 8000
name: http
selector:
app: vllm
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: workspace-storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi