-
-
Notifications
You must be signed in to change notification settings - Fork 8
/
docker-compose.gpu.yaml
101 lines (97 loc) · 3.05 KB
/
docker-compose.gpu.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
services:
kirin:
container_name: kirin
restart: always
build:
dockerfile: Dockerfile
context: ./backend/
environment:
- ENVIRONMENT=${ENVIRONMENT}
- DEBUG=${DEBUG}
- BACKEND_SERVER_HOST=${BACKEND_SERVER_HOST}
- BACKEND_SERVER_PORT=${BACKEND_SERVER_PORT}
- BACKEND_SERVER_WORKERS=${BACKEND_SERVER_WORKERS}
- BACKEND_SERVER_VERSION=${BACKEND_SERVER_VERSION}
- IS_ALLOWED_CREDENTIALS=${IS_ALLOWED_CREDENTIALS}
- API_TOKEN=${API_TOKEN}
- AUTH_TOKEN=${AUTH_TOKEN}
- JWT_SECRET_KEY=${JWT_SECRET_KEY}
- JWT_SUBJECT=${JWT_SUBJECT}
- JWT_TOKEN_PREFIX=${JWT_TOKEN_PREFIX}
- JWT_ALGORITHM=${JWT_ALGORITHM}
- JWT_MIN=${JWT_MIN}
- JWT_HOUR=${JWT_HOUR}
- JWT_DAY=${JWT_DAY}
- HASHING_ALGORITHM_LAYER_1=${HASHING_ALGORITHM_LAYER_1}
- HASHING_ALGORITHM_LAYER_2=${HASHING_ALGORITHM_LAYER_2}
- HASHING_SALT=${HASHING_SALT}
- INFERENCE_ENG=${INFERENCE_ENG}
- INFERENCE_ENG_PORT=${INFERENCE_ENG_PORT}
- INFERENCE_ENG_VERSION=${INFERENCE_ENG_VERSION}
- EMBEDDING_ENG=${EMBEDDING_ENG}
- EMBEDDING_ENG_PORT=${EMBEDDING_ENG_PORT}
- LANGUAGE_MODEL_NAME=${LANGUAGE_MODEL_NAME}
- ADMIN_USERNAME=${ADMIN_USERNAME}
- ADMIN_EMAIL=${ADMIN_EMAIL}
- ADMIN_PASS=${ADMIN_PASS}
- TIMEZONE=${TIMEZONE}
- INSTRUCTION=${INSTRUCTION}
- NUM_CPU_CORES=${NUM_CPU_CORES}
- NUM_CPU_CORES_EMBEDDING=${NUM_CPU_CORES_EMBEDDING}
- EMBEDDING_MODEL_NAME=${EMBEDDING_MODEL_NAME}
- METRICS_PATHS=${METRICS_PATHS}
- DEFAULT_RAG_DS_NAME=${DEFAULT_RAG_DS_NAME}
volumes:
- ./backend/:/app/
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/models:/models
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/vdata:/vdata
expose:
- 8000
ports:
- 8000:8000
depends_on:
# - db
- llamacpp
- embedding_eng
llamacpp:
container_name: ${INFERENCE_ENG}
image: gclub/llama.cpp:${INFERENCE_ENG_VERSION}
restart: always
deploy: # https://github.com/compose-spec/compose-spec/blob/master/deploy.md
resources:
reservations:
devices:
- capabilities: ["gpu"]
volumes:
- "${DOCKER_VOLUME_DIRECTORY:-.}/volumes/models:/models"
expose:
- 8080
ports:
- 8080:8080
command: ["-m", "models/${LANGUAGE_MODEL_NAME}","-c","8192"]
embedding_eng:
container_name: ${EMBEDDING_ENG}
image: gclub/llama.cpp:${INFERENCE_ENG_VERSION}
restart: always
deploy: # https://github.com/compose-spec/compose-spec/blob/master/deploy.md
resources:
reservations:
devices:
- capabilities: ["gpu"]
volumes:
- "${DOCKER_VOLUME_DIRECTORY:-.}/volumes/models:/models"
expose:
- 8080
ports:
- 8082:8080
command: ["-m", "models/${EMBEDDING_MODEL_NAME}","--embeddings","--pooling","mean","-c","512"]
rebel:
container_name: rebel
image: ghcr.io/skywardai/rebel:v0.1.9
restart: always
expose:
- 80
ports:
- 80:80
depends_on:
- kirin