Skip to content

Commit

Permalink
Allow to choose between OpenSearch and Vespa
Browse files Browse the repository at this point in the history
  • Loading branch information
medihack committed May 26, 2024
1 parent 883b8d4 commit c5a24ef
Show file tree
Hide file tree
Showing 9 changed files with 235 additions and 107 deletions.
37 changes: 4 additions & 33 deletions compose/docker-compose.base.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,17 @@ x-app: &default-app
- /mnt:/mnt
depends_on:
- postgres
- vespa
environment:
USE_DOCKER: 1
DJANGO_STATIC_ROOT: "/var/www/radis/static/"
DATABASE_URL: "psql://postgres:[email protected]:5432/postgres"
OPENSEARCH_ENABLED: "${OPENSEARCH_ENABLED-true}"
OPENSEARCH_HOST: "opensearch-node1.local"
OPENSEARCH_PORT: "9200"
VESPA_ENABLED: "${VESPA_ENABLED-false}"
VESPA_HOST: "vespa.local"
VESPA_CONFIG_PORT: "19071"
VESPA_DATA_PORT: "8080"
OPENSEARCH_HOST: "opensearch-node1.local"
OPENSEARCH_PORT: "9200"
RABBITMQ_URL: "amqp://rabbit"
RABBIT_MANAGEMENT_HOST: "rabbit"
RABBIT_MANAGEMENT_PORT: "15672"
Expand Down Expand Up @@ -57,39 +58,12 @@ services:
celery --broker=amqp://rabbit/ flower --url_prefix=flower
"
llamacpp:
image: ghcr.io/ggerganov/llama.cpp:server
hostname: llamacpp.local

postgres:
image: postgres:16.2
hostname: postgres.local
volumes:
- postgres_data:/var/lib/postgresql/data

# https://opensearch.org/docs/latest/install-and-configure/install-opensearch/docker/#sample-docker-composeyml
opensearch-node1:
image: opensearchproject/opensearch:latest
hostname: opensearch-node1.local
volumes:
- opensearch_data:/usr/share/opensearch/data

opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest
hostname: opensearch-dashboards.local

vespa:
image: vespaengine/vespa:8
hostname: vespa.local
healthcheck:
test: curl http://localhost:19071/state/v1/health
timeout: 10s
retries: 3
start_period: 40s
volumes:
- vespa_data:/opt/vespa/var
- vespa_logs:/opt/vespa/logs

# RabbitMQ authentication can't be disabled. So when we try to log into
# the management console we have to use "guest" as username and password.
# The real authentication happens by ADIT itself, because the management
Expand All @@ -116,8 +90,5 @@ volumes:
radis_data:
flower_data:
postgres_data:
opensearch_data:
vespa_data:
vespa_logs:
rabbit_data:
redis_data:
92 changes: 54 additions & 38 deletions compose/docker-compose.dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,105 +27,121 @@ services:
wait-for-it -s postgres.local:5432 -t 60 &&
./manage.py migrate &&
./manage.py collectstatic --no-input &&
wait-for-it -s opensearch-node1.local:9200 -t 60 &&
./manage.py opensearch --mappings dev &&
wait-for-it -s vespa.local:19071 -t 60 &&
./manage.py vespa --generate --deploy &&
if [[ $$OPENSEARCH_ENABLED == true ]]; then
wait-for-it -s opensearch-node1.local:9200 -t 60 &&
./manage.py opensearch --mappings dev
fi
if [[ $$VESPA_ENABLED == true ]]; then
wait-for-it -s vespa.local:19071 -t 60 &&
./manage.py vespa --generate --deploy
fi
./manage.py populate_users_and_groups --users 20 --groups 3 &&
./manage.py populate_reports --report-language de &&
./manage.py runserver 0.0.0.0:8000
"
profiles:
- full
- web
worker_default:
<<: *default-app
command: |
./manage.py celery_worker -c 1 -Q default_queue --autoreload
profiles:
- full
worker_vespa:
<<: *default-app
command: |
./manage.py celery_worker -c 1 -Q vespa_queue --autoreload
profiles:
- full
profiles: ["vespa"]

worker_llm:
<<: *default-app
command: |
./manage.py celery_worker -c 1 -Q llm_queue --autoreload
profiles:
- full
celery_beat:
<<: *default-app
command: |
./manage.py celery_beat --autoreload
profiles:
- full
flower:
<<: *default-app
profiles:
- full

llamacpp:
llamacpp-cpu:
image: ghcr.io/ggerganov/llama.cpp:server
hostname: llamacpp.local
ports:
- 9610:8080
volumes:
- ../models:/models
command: "-m /models/model.gguf -c 512 --host 0.0.0.0 --port 8080"
profiles:
- full
profiles: ["cpu"]

llamacpp-gpu:
image: ghcr.io/ggerganov/llama.cpp:server-cuda
hostname: llamacpp.local
ports:
- 9610:8080
volumes:
- ../models:/models
command: "-m /models/model.gguf -c 512 --host 0.0.0.0 --port 8080"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
profiles: ["gpu"]

postgres:
environment:
POSTGRES_PASSWORD: "postgres"
profiles:
- full
- web

# https://opensearch.org/docs/latest/install-and-configure/install-opensearch/docker/#sample-docker-composeyml
opensearch-node1:
image: opensearchproject/opensearch:latest
hostname: opensearch-node1.local
volumes:
- opensearch_data1:/usr/share/opensearch/data
environment:
OPENSEARCH_JAVA_OPTS: "-Xms2048m -Xmx2048m"
OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m"
DISABLE_SECURITY_PLUGIN: "true"
discovery.type: single-node # https://github.com/gitpod-io/gitpod/issues/8399
ports:
- 9200:9200
profiles:
- full
- web
profiles: ["opensearch"]

opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:latest
hostname: opensearch-dashboards.local
ports:
- 5601:5601
environment:
DISABLE_SECURITY_DASHBOARDS_PLUGIN: "true"
OPENSEARCH_HOSTS: '["http://opensearch-node1.local:9200"]'
profiles: ["opensearch"]

vespa:
image: vespaengine/vespa:8
hostname: vespa.local
healthcheck:
test: curl http://localhost:19071/state/v1/health
timeout: 10s
retries: 3
start_period: 40s
volumes:
- vespa_data:/opt/vespa/var
- vespa_logs:/opt/vespa/logs
environment:
# Reduce memory usage of Vespa during development (espcially on Cloud IDEs), see
# https://docs.vespa.ai/en/operations/node-setup.html#memory-settings
VESPA_CONFIGSERVER_JVMARGS: "-Xms32M -Xmx128M"
VESPA_CONFIGPROXY_JVMARGS: "-Xms32M -Xmx32M"
ports:
- 9620:8080
profiles:
- full
- web

rabbit:
profiles:
- full

redis:
profiles:
- full
profiles: ["vespa"]

volumes:
vscode-server:
vscode-server-insiders:
opensearch_data1:
vespa_data:
vespa_logs:
Loading

0 comments on commit c5a24ef

Please sign in to comment.