diff --git a/README.md b/README.md index ad14ed8..001f157 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ helm repo update curl https://raw.githubusercontent.com/deephealthproject/docker-backend/develop/k8s/deephealth-backend/values.yaml -o values.yaml ``` -3. Edit the `values.yaml` to configure your deployment ([here](#helm-chart-parameters) the available parameters) +3. Edit the `values.yaml` to configure your deployment ([here](#helm-chart-parameters) the available parameters) 4. To install the chart with the release name `deephealth-backend` : ```bash @@ -62,7 +62,7 @@ helm repo add deephealth https://deephealthproject.github.io/helm-charts/ helm dependency build k8s/deephealth-backend ``` -3. Edit the ´k8s/values.yaml´ template to configure your deployment ([here](#helm-chart-parameters) the available parameters) +3. Edit the ´k8s/values.yaml´ template to configure your deployment ([here](#helm-chart-parameters) the available parameters) 4. Deploy on Kubernetes through `helm`: @@ -94,7 +94,7 @@ ingress: ### Parameters -The following tables lists the main configurable parameters of the `deephealth-backend` chart and their default values. +The following tables lists the main configurable parameters of the `deephealth-backend` chart and their default values. | Parameter | Description | Default | |-----------------------------------------------|-----------------------------------------------------------------------------------------------|-----------------------------------------------| @@ -108,10 +108,14 @@ The following tables lists the main configurable parameters of the `deephealth-b | `ingress.annations` | Annotations for the ingress realated with the API Endpoint service | `kubernetes.io/ingress.class: nginx` | | `ingress.hosts` | Hosts paths for the ingress realated with the API Endpoint service (see example) | `nil` | | `backend.image.repository` | Back-end App Docker Image | `dhealth/backend` | -| `backend.image.tag` | Back-end App Docker Image Tag | `2fd828d8` | +| `backend.image.tag` | Back-end App Docker Image Tag | `2fd828d8` | | `backend.admin.username` | Username of the administrator of the backend app | `admin` | | `backend.admin.password` | Password of the administrator of the backend app (autogenerated if not defined) | `nil` | -| `backend.admin.email` | Email of the administrator of the backend app | `nil` | +| `backend.admin.password` | Password of the administrator of the backend app (autogenerated if not defined) | `nil` | +| `backend.social_auth.github.client_id` | ClientID of the back-end registered as OAuth2 app on Github | `nil` | +| `backend.social_auth.github.client_secret` | Client Secret of the back-end registered as OAuth2 app on Github | `nil` | +| `backend.email.user` | Username for the the back-end email service | `nil` | +| `backend.email.password` | Password for the the back-end email service | `nil` | | `backend.replicaCount` | Number of replicase of the the backend (Gunicorn) server replicas | `1` | | `backend.workers` | Number of workers of every backend (Gunicorn) server replicas | `3` | | `backend.persistence.data.storageClass` | Storage class used for backend data (requires support for `ReadWriteMany` access mode) | `*globalStorageClass` | diff --git a/config.template b/config.template index 2d47b68..2f2b9a6 100644 --- a/config.template +++ b/config.template @@ -1,6 +1,9 @@ # Allow DEBUG mode. Don't run with debug turned on in production! DEBUG=${DEBUG} +# Enable eddl CS_GPU computing service +EDDL_WITH_CUDA=${EDDL_WITH_CUDA} + # Set SECRET_KEY. Keep the secret key used in production secret! SECRET_KEY="${SECRET_KEY}" @@ -15,12 +18,13 @@ CORS_ORIGIN_WHITELIST="${CORS_ORIGIN_WHITELIST}" # DATABASE_URL=psql://urser:un-githubbedpassword@127.0.0.1:8458/database # PostgreSQL DB # Relative path for static resources -STATIC_URL="${STATIC_URL}" +STATIC_URL='/backend/static/' + +# Base url to serve media files +MEDIA_URL='/backend/media/' -# Data directories -TRAINING_DIR="${TRAINING_DIR}" -INFERENCE_DIR="${INFERENCE_DIR}" -DATASETS_DIR="${DATASETS_DIR}" +# Data directory +DATA_DIR="${DATA_DIR}" # RabbitMQ Credentials # see https://hub.docker.com/_/rabbitmq @@ -31,3 +35,10 @@ CELERY_ACCEPT_CONTENT="${CELERY_ACCEPT_CONTENT}" # list of comma separated value CELERY_RESULT_BACKEND="${CELERY_RESULT_BACKEND}" CELERY_TASK_SERIALIZER="${CELERY_TASK_SERIALIZER}" +# OAuth settings +DRFSO2_PROPRIETARY_BACKEND_NAME='GithubOAuth2' +DRFSO2_URL_NAMESPACE='social' + +# GitHub configuration +SOCIAL_AUTH_GITHUB_KEY='' +SOCIAL_AUTH_GITHUB_SECRET='' diff --git a/docker-compose.template.yml b/docker-compose.template.yml index 0882777..cd8ddd7 100644 --- a/docker-compose.template.yml +++ b/docker-compose.template.yml @@ -12,7 +12,7 @@ services: restart: "no" user: "${UID}:${GID}" runtime: ${DOCKER_RUNTIME} - command: ["/bin/bash","-c","init.sh && start.sh backend"] + command: ["/bin/bash","-c","wait-for-postgres.sh && init.sh && start.sh backend"] env_file: &envfile - settings.conf environment: @@ -21,14 +21,11 @@ services: - DATABASE_URL=psql://${POSTGRESQL_USERNAME}:${POSTGRESQL_PASSWORD}@db:5432/${POSTGRESQL_DATABASE} # PostgreSQL DB - POSTGRESQL_HOST=db ports: - - "${BACKEND_PORT}:8000" + - "${BACKEND_PORT}:${BACKEND_PORT}" volumes: - ${BACKEND_VOLUME} - ./.config:/app/config - ./settings.conf:/app/settings.conf - - datasets:${DATASETS_DIR} - - training:${TRAINING_DIR} - - inference:${INFERENCE_DIR} + - data:${DATA_DIR} depends_on: - broker - db @@ -48,12 +45,9 @@ services: - DATABASE_URL=psql://${POSTGRESQL_USERNAME}:${POSTGRESQL_PASSWORD}@db:5432/${POSTGRESQL_DATABASE} # PostgreSQL DB command: ["start.sh","celery"] volumes: - ${BACKEND_VOLUME} - ./.config:/app/config - ./settings.conf:/app/settings.conf - - datasets:${DATASETS_DIR} - - training:${TRAINING_DIR} - - inference:${INFERENCE_DIR} + - data:${DATA_DIR} depends_on: - broker - db @@ -65,7 +59,6 @@ services: environment: - PYTHONPATH="$PYTHONPATH:.%" volumes: - ${BACKEND_VOLUME} - ./.config:/app/config - ./settings.conf:/app/settings.conf - postgresql:/bitnami/postgresql @@ -89,6 +82,4 @@ services: volumes: postgresql: - datasets: - inference: - training: \ No newline at end of file + data: diff --git a/docker/.dockerignore b/docker/.dockerignore index 8184442..469da21 100644 --- a/docker/.dockerignore +++ b/docker/.dockerignore @@ -4,4 +4,5 @@ **/.vscode **/.DS_Store **/.env +**/*.pyc **/venv diff --git a/docker/Dockerfile b/docker/Dockerfile index b26f691..d6c10c7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -9,18 +9,18 @@ ARG USERNAME=backend # Set defaults ARG BACKEND_PORT=8000 -ENV BACKEND_PORT ${BACKEND_PORT} +ENV BACKEND_PORT ${BACKEND_PORT} # Set the path where the app will be installed ENV APP_PATH "/app" # Set data paths -ARG DATASETS_DIR="/data/datasets" -ENV DATASETS_DIR=${DATASETS_DIR} -ARG TRAINING_DIR="/data/training" -ARG TRAINING_DIR=${TRAINING_DIR} -ARG INFERENCE_DIR="/data/inference" -ARG INFERENCE_DIR=${INFERENCE_DIR} +ARG DATA_DIR="/data" +ENV DATA_DIR=${DATA_DIR} + +# Locale settings +ENV LC_ALL C.UTF-8 +ENV LANG C.UTF-8 # Copy source path COPY src ${APP_PATH} @@ -48,20 +48,16 @@ RUN \ /usr/local/bin/wait-for-postgres.sh \ # install app requirements && pip --no-cache-dir install -r requirements.txt \ + && pip --no-cache-dir install click==7.1.2 \ # create the user && groupadd ${USERNAME} -g ${GID} || true \ && useradd -m -u ${UID} -g ${GID} ${USERNAME} \ && chown -R ${USERNAME} ${APP_PATH} \ - && mkdir -p ${DATASETS_DIR} ${INFERENCE_DIR} ${TRAINING_DIR} \ - && chown -R ${USERNAME} ${DATASETS_DIR} ${TRAINING_DIR} ${INFERENCE_DIR} - -# Declare data volumes -#VOLUME ["${DATASETS_DIR}"] -#VOLUME ["${INFERENCE_DIR}"] -#VOLUME ["${TRAINING_DIR}"] + && mkdir -p ${DATA_DIR} \ + && chown -R ${USERNAME} ${DATA_DIR} # Set the default user USER ${USERNAME} # Default command -CMD start.sh backend \ No newline at end of file +CMD start.sh backend diff --git a/docker/init.sh b/docker/init.sh index 76f7b7e..f9f08cd 100644 --- a/docker/init.sh +++ b/docker/init.sh @@ -3,12 +3,26 @@ ## Notice that we assume /app as current working directory # Apply all available migrations -python3 manage.py migrate +python manage.py migrate -# Create the user if doesn't exist +# Creating an admin user (non-interactive) +# (~equivalent to the interactive cmd `python manage.py createsuperuser`) cat <&2 echo "PostgreSQL is unavailable -- sleep 2 seconds and retry" ; sleep 2 ; done ; @@ -83,4 +83,4 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} - backoffLimit: 4 \ No newline at end of file + backoffLimit: 4 diff --git a/k8s/deephealth-backend/templates/secrets.yaml b/k8s/deephealth-backend/templates/secrets.yaml index 4668937..89f1e93 100644 --- a/k8s/deephealth-backend/templates/secrets.yaml +++ b/k8s/deephealth-backend/templates/secrets.yaml @@ -36,7 +36,7 @@ stringData: # Cross-Origin Resource Sharing (CORS) whitelist CORS_ORIGIN_WHITELIST="{{- range .Values.ingress.hosts }},http://{{ .host }},https://{{ .host }}{{- end }}{{- if .Values.backend.corsOriginWhiteList }},{{ .Values.backend.corsOriginWhiteList }}{{- end }}" - + # Database connection settings # DATABASE_URL=sqlite:///my-local-sqlite.db # SQLite DB # DATABASE_URL=psql://{{ .Values.postgresql.postgresqlUsername }}:{{ .Values.postgresql.postgresqlPassword }}@{{ include "deephealth-backend.fullname" . }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }} @@ -49,7 +49,7 @@ stringData: # Data directories DATA_DIR={{ .Values.backend.persistence.data.path }} - + # RabbitMQ Credentials # see https://hub.docker.com/_/rabbitmq # RABBITMQ_BROKER_URL=amqp://{{ .Values.broker.rabbitmq.username }}:{{ .Values.broker.rabbitmq.password }}@{{ include "deephealth-backend.fullname" . }}-rabbitmq:{{ .Values.broker.service.port }} @@ -58,6 +58,18 @@ stringData: CELERY_ACCEPT_CONTENT={{ .Values.celery.acceptContent | quote }} # CELERY_RESULT_BACKEND=db+sqlite:///results.sqlite CELERY_TASK_SERIALIZER={{ .Values.celery.taskSerializer | quote }} + + # OAuth settings + DRFSO2_PROPRIETARY_BACKEND_NAME='GithubOAuth2' + DRFSO2_URL_NAMESPACE='social' + + # GitHub configuration + SOCIAL_AUTH_GITHUB_KEY={{ .Values.backend.social_auth.github.client_id | quote }} + SOCIAL_AUTH_GITHUB_SECRET={{ .Values.backend.social_auth.github.client_secret | quote }} + + # Email backend settings + EMAIL_HOST_USER={{ .Values.backend.email.user | quote }} + EMAIL_HOST_PASSWORD={{ .Values.backend.email.password | quote }} --- {{- end }} {{- if or .Release.IsInstall (and .Release.IsUpgrade (or .Values.postgresql.postgresqlPassword .Values.postgresql.postgresqlPostgresPassword)) }} @@ -75,7 +87,7 @@ metadata: helm.sh/resource-policy: keep type: Opaque data: - postgresql-postgres-password: {{ include "deephealth-backend.postgresql.postgres-password" . | b64enc | quote }} + postgresql-postgres-password: {{ include "deephealth-backend.postgresql.postgres-password" . | b64enc | quote }} postgresql-password: {{ include "deephealth-backend.postgresql.password" . | b64enc | quote }} {{- if .Values.postgresql.replication.enabled }} postgresql-replication-password: {{ include "deephealth-backend.postgresql.replication.password" . | b64enc | quote }} @@ -110,4 +122,4 @@ data: {{ else }} rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} {{ end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/k8s/deephealth-backend/values.yaml b/k8s/deephealth-backend/values.yaml index 72b5f05..5d82900 100644 --- a/k8s/deephealth-backend/values.yaml +++ b/k8s/deephealth-backend/values.yaml @@ -56,13 +56,13 @@ backend: # number of replicas replicaCount: 1 - + # number of Gunicorn workers workers: 3 image: &backend_image repository: dhealth/backend - tag: 2fd828d8 + tag: 11f2b98 pullPolicy: *imagePullPolicy service: @@ -82,8 +82,19 @@ backend: staticFiles: storageClass: *globalStorageClass size: 10Mi - + # Secrets to access social authentication providers + social_auth: + github: + client_id: + client_secret: + + # Email settings + email: + user: + password: + + # Configure pod resources resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -95,9 +106,9 @@ backend: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs @@ -113,11 +124,11 @@ nginx: nameOverride: "nginx" image: - # repository: + # repository: # tag: latest pullPolicy: *imagePullPolicy debug: *debug - + service: type: ClusterIP port: 80 @@ -137,7 +148,7 @@ nginx: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # hosts: - # - host: + # - host: # name: pippo.backend.172.30.10.101.nip.io # path: / @@ -164,7 +175,7 @@ celery: acceptContent: 'json' # list of comma separated values taskSerializer: 'json' - + service: # type: ClusterIP # port: 80 @@ -194,9 +205,9 @@ celery: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs @@ -207,7 +218,7 @@ celery: affinity: {} -# RabbitMQ settings. You can find all the available settings +# RabbitMQ settings. You can find all the available settings # at https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq broker: &broker nameOverride: "rabbitmq" @@ -215,11 +226,11 @@ broker: &broker image: debug: *debug pullPolicy: *imagePullPolicy - + rabbitmq: username: user # password: password - + service: port: 5672 @@ -231,7 +242,7 @@ broker: &broker rabbitmq: existingPasswordSecret: deephealth-backend-rabbitmq-secrets existingErlangSecret: deephealth-backend-rabbitmq-secrets - + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -308,7 +319,7 @@ console: debug: *debug replicaCount: 1 - + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -320,9 +331,9 @@ console: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs diff --git a/k8s/values.yaml b/k8s/values.yaml index c62adc6..f435714 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -14,13 +14,11 @@ global: # preserve PVC when a release is deleted retainPVCs: True - # ServiceType of the Back-end EndPoint # endpoint: # service: # type: NodePort - # Set external access to the services # ingress: # enabled: true @@ -28,14 +26,14 @@ global: # kubernetes.io/ingress.class: nginx # # kubernetes.io/tls-acme: "true" # hosts: -# - host: backend.172.30.10.101.nip.io +# - host: backend.172.30.10.100.nip.io # serviceName: endpoint # service name without ReleasePrefix # servicePort: 80 # tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local +# - secretName: chart-example-tls +# hosts: +# - chart-example.local # Backend settings backend: @@ -47,8 +45,9 @@ backend: replicaCount: 1 image: &backend_image - # repository: dhealth/backend - tag: 2fd828d8 + repository: dhealth/backend + tag: 11f2b98 + pullPolicy: *imagePullPolicy # service: # type: NodePort @@ -61,7 +60,7 @@ backend: persistence: data: storageClass: *globalStorageClass - path: '/data' + path: "/data" size: 10Gi # existingClaim: datasets staticFiles: @@ -74,9 +73,21 @@ backend: allowedHosts: "*" # Cross-Origin Resource Sharing (CORS) whitelist - corsOriginWhiteList: "" - resources: {} + # Secrets to access social authentication providers + social_auth: + github: + client_id: + client_secret: + + # Email settings + email: + user: + password: + + # Configure pod resources + resources: + {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -87,9 +98,9 @@ backend: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs @@ -99,14 +110,13 @@ backend: affinity: {} - # nginx settings nginx: nameOverride: "nginx" image: debug: *debug - + # service: # type: NodePort # port: 80 @@ -115,16 +125,15 @@ nginx: #serverBlockConfigMap: proxy-config # serverDataVolumeClaim: static-files # serverDataVolumePath: /app/static - ingress: # enabled: false # hostname: nginx.backend.172.30.10.101.nip.io # annotations: # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" + # kubernetes.io/tls-acme: "true" # hosts: - # - host: + # - host: # name: pippo.backend.172.30.10.101.nip.io # path: / @@ -134,7 +143,8 @@ nginx: # hosts: # - chart-example.local - resources: {} + resources: + {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -148,10 +158,9 @@ nginx: # Celery settings celery: + acceptContent: "json" # list of comma separated values + taskSerializer: "json" - acceptContent: 'json' # list of comma separated values - taskSerializer: 'json' - service: # type: ClusterIP # port: 80 @@ -170,7 +179,8 @@ celery: # # hosts: # # - chart-example.local - resources: {} + resources: + {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -181,9 +191,9 @@ celery: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs @@ -193,19 +203,18 @@ celery: affinity: {} - -# RabbitMQ settings. You can find all the available settings +# RabbitMQ settings. You can find all the available settings # at https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq broker: &broker nameOverride: "rabbitmq" image: debug: *debug - + rabbitmq: username: user # password: password - + service: port: 5672 @@ -213,7 +222,7 @@ broker: &broker storageClass: *globalStorageClass size: 1Gi # existingClaim: "" - + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -235,7 +244,6 @@ broker: &broker # Copy broker properties as values of the subchart RabbitMQ rabbitmq: *broker - # PostgreSQL settings. # You can find all the available settings at https://github.com/bitnami/charts/tree/master/bitnami/postgresql postgresql: @@ -275,7 +283,6 @@ postgresql: affinity: {} - # Debug console settings console: enabled: *debug @@ -286,8 +293,9 @@ console: # debug: *debug replicaCount: 1 - - resources: {} + + resources: + {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -298,9 +306,9 @@ console: # requests: # cpu: 100m # memory: 128Mi - # To use GPU you need to install - # https://github.com/NVIDIA/k8s-device-plugin) - # on your k8s Cluster + # To use GPU you need to install + # https://github.com/NVIDIA/k8s-device-plugin) + # on your k8s Cluster # limits: # nvidia.com/gpu: 2 # requesting 2 GPUs diff --git a/settings.conf b/settings.conf index 6aa349a..4640dcc 100644 --- a/settings.conf +++ b/settings.conf @@ -22,23 +22,17 @@ BACKEND_PORT=8000 FLOWER_PORT=5555 # Base iamge for building the Back-end image -DOCKER_LIBS_IMAGE=dhealth/pylibs:develop_build77 +DOCKER_LIBS_IMAGE=dhealth/pylibs-toolkit:latest # Docker image name DOCKER_BACKEND_IMAGE=dhealth/backend -# Set Docker Runtime: runc | nvidia -# The current implementation only support 'nvidia' runtime -# Notice that to use GPUs you need to use the 'nvidia' -# DOCKER_RUNTIME *and* use a GPU-enabled pylibs container image. -DOCKER_RUNTIME=runc #"nvidia" +# Set to 'True' to enable eddl CS_GPU computing service (default: False) +EDDL_WITH_CUDA=False # Set path to your local copy of the Back-end #BACKEND_LOCAL_PATH="" -# Django static -STATIC_URL=/backend/static/ - # Comma separated allowed hosts ALLOWED_HOSTS=localhost,127.0.0.1 @@ -46,12 +40,17 @@ ALLOWED_HOSTS=localhost,127.0.0.1 CORS_ORIGIN_WHITELIST=http://localhost:4200 # Data directories -TRAINING_DIR=/data/training -INFERENCE_DIR=/data/inference -DATASETS_DIR=/data/datasets +DATA_DIR=/data # Celery settings CELERY_ACCEPT_CONTENT=json CELERY_RESULT_BACKEND=db+sqlite:///results.sqlite CELERY_TASK_SERIALIZER=json +# Social login settings +SOCIAL_AUTH_GITHUB_KEY= +SOCIAL_AUTH_GITHUB_SECRET= + +# Email backend settings +EMAIL_HOST_USER= +EMAIL_HOST_PASSWORD= diff --git a/setup.sh b/setup.sh index 6c6b64e..06dcf7d 100755 --- a/setup.sh +++ b/setup.sh @@ -21,6 +21,16 @@ export SECRET_KEY=$(\ docker run -it --rm "${DOCKER_BACKEND_IMAGE}" \ python3 -c 'from django.core.management.utils import get_random_secret_key; print(get_random_secret_key())' | tr -d '[:space:]') +# Set Docker Runtime: runc | nvidia +# The current implementation only support 'nvidia' runtime +# Notice that to use GPUs you need to use the 'nvidia' +# DOCKER_RUNTIME *and* use a GPU-enabled pylibs container image. +DOCKER_RUNTIME=runc +if [[ "${EDDL_WITH_CUDA}" == "True" || "${EDDL_WITH_CUDA}" == "true" ]]; then + DOCKER_RUNTIME=nvidia +fi +export DOCKER_RUNTIME + # generate config file from settings envsubst < config.template > .config @@ -45,10 +55,7 @@ fi # build images docker-compose build \ --build-arg DOCKER_LIBS_IMAGE="${DOCKER_LIBS_IMAGE:-}" \ - --build-arg DATASETS_DIR="${DATASETS_DIR:-}" \ - --build-arg TRAINING_DIR="${TRAINING_DIR:-}" \ - --build-arg INFERENCE_DIR="${INFERENCE_DIR:-}" - + --build-arg DATA_DIR="${DATA_DIR:-}" # clean up source #rm -rf docker/src