Skip to content

Commit

Permalink
Merge pull request #381 from kikkomep/chore/increase-timeouts
Browse files Browse the repository at this point in the history
feat: enable the capability to configure Gunicorn
  • Loading branch information
kikkomep authored Feb 29, 2024
2 parents b826751 + 73aaf15 commit c9ca4d8
Show file tree
Hide file tree
Showing 6 changed files with 228 additions and 112 deletions.
33 changes: 27 additions & 6 deletions docker/lm_entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,32 @@ else
mkdir -p ${metrics_base_path}
export PROMETHEUS_MULTIPROC_DIR=$(mktemp -d ${metrics_base_path}/backend.XXXXXXXX)
fi

# gunicorn settings
export GUNICORN_SERVER="true"
gunicorn --workers "${GUNICORN_WORKERS}" \
--threads "${GUNICORN_THREADS}" \
--config "${GUNICORN_CONF}" \
--certfile="${CERT}" --keyfile="${KEY}" \
-b "0.0.0.0:8000" \
"app"
export GUNICORN_WORKERS="${GUNICORN_WORKERS:-2}"
export GUNICORN_THREADS="${GUNICORN_THREADS:-1}"
export GUNICORN_WORKER_CLASS="${GUNICORN_WORKER_CLASS:-sync}"
export GUNICORN_MAX_REQUESTS="${GUNICORN_MAX_REQUESTS:-0}"
export GUNICORN_MAX_REQUESTS_JITTER="${GUNICORN_MAX_REQUESTS_JITTER:-0}"
export GUNICORN_WORKER_CONNECTIONS="${GUNICORN_WORKER_CONNECTIONS:-1000}"
export GUNICORN_TIMEOUT="${GUNICORN_TIMEOUT:-30}"
export GUNICORN_GRACEFUL_TIMEOUT="${GUNICORN_GRACEFUL_TIMEOUT:-30}"
export GUNICORN_KEEPALIVE="${GUNICORN_KEEPALIVE:-2}"

# run app with gunicorn
printf "Starting app in PROD mode (Gunicorn)"
gunicorn --workers "${GUNICORN_WORKERS}" \
--threads "${GUNICORN_THREADS}" \
--max-requests "${GUNICORN_MAX_REQUESTS}" \
--max-requests-jitter "${GUNICORN_MAX_REQUESTS_JITTER}" \
--worker-connections "${GUNICORN_WORKER_CONNECTIONS}" \
--worker-class "${GUNICORN_WORKER_CLASS}" \
--timeout "${GUNICORN_TIMEOUT}" \
--graceful-timeout "${GUNICORN_GRACEFUL_TIMEOUT}" \
--keep-alive "${GUNICORN_KEEPALIVE}" \
--config "${GUNICORN_CONF}" \
--certfile="${CERT}" --keyfile="${KEY}" \
-b "0.0.0.0:8000" \
"app"
fi
16 changes: 16 additions & 0 deletions k8s/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,22 @@ Define environment variables shared by some pods.
value: "{{ .Values.worker.processes }}"
- name: WORKER_THREADS
value: "{{ .Values.worker.threads }}"
- name: GUNICORN_WORKERS
value: "{{ .Values.lifemonitor.gunicorn.workers }}"
- name: GUNICORN_THREADS
value: "{{ .Values.lifemonitor.gunicorn.threads }}"
- name: GUNICORN_MAX_REQUESTS
value: "{{ .Values.lifemonitor.gunicorn.max_requests }}"
- name: GUNICORN_MAX_REQUESTS_JITTER
value: "{{ .Values.lifemonitor.gunicorn.max_requests_jitter }}"
- name: GUNICORN_WORKER_CONNECTIONS
value: "{{ .Values.lifemonitor.gunicorn.worker_connections }}"
- name: GUNICORN_GRACEFUL_TIMEOUT
value: "{{ .Values.lifemonitor.gunicorn.graceful_timeout }}"
- name: GUNICORN_TIMEOUT
value: "{{ .Values.lifemonitor.gunicorn.timeout }}"
- name: GUNICORN_KEEPALIVE
value: "{{ .Values.lifemonitor.gunicorn.keepalive }}"
- name: LIFEMONITOR_TLS_KEY
value: "/lm/certs/tls.key"
- name: LIFEMONITOR_TLS_CERT
Expand Down
254 changes: 150 additions & 104 deletions k8s/templates/nginx-configmap.yaml
Original file line number Diff line number Diff line change
@@ -1,111 +1,157 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: lifemonitor-nginx-configmap
labels:
app.kubernetes.io/name: {{ include "chart.name" . }}
helm.sh/chart: {{ include "chart.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
name: lifemonitor-nginx-configmap
labels:
app.kubernetes.io/name: { { include "chart.name" . } }
helm.sh/chart: { { include "chart.chart" . } }
app.kubernetes.io/instance: { { .Release.Name } }
app.kubernetes.io/managed-by: { { .Release.Service } }
data:
server-block.conf: |-
# set upstream server
upstream lm_app {
# fail_timeout=0 means we always retry an upstream even if it failed
# to return a good HTTP response
server {{ include "chart.fullname" . }}-backend:8000 fail_timeout=0;
server-block.conf: |-
# set upstream server
upstream lm_app {
# fail_timeout=0 means we always retry an upstream even if it failed
# to return a good HTTP response
server {{ include "chart.fullname" . }}-backend:8000 fail_timeout=0;
}
{{- if .Values.rateLimiting.zone.accounts.enabled }}
# Define Rate Limiting Zones
limit_req_zone $binary_remote_addr zone=api_accounts:{{ .Values.rateLimiting.zone.accounts.size }} rate={{ .Values.rateLimiting.zone.accounts.rate }};
{{- end }}
server {
listen 0.0.0.0:8080 default_server;
# set the correct host(s) for your site
server_name localhost;
#ssl_certificate /nginx/certs/lm.crt;
#ssl_certificate_key /nginx/certs/lm.key;
# force HTTP traffic to HTTPS
error_page 497 https://$http_host$request_uri;
# define error pages
error_page 404 /error/404;
error_page 429 /error/429;
error_page 500 /error/500;
error_page 502 /error/502;
# location for error pages
location ~ ^/error {
# disable redirects
proxy_redirect off;
# rewrite headers
proxy_pass_header Server;
proxy_set_header X-Real-IP $http_x_forwarded_for;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $http_host;
proxy_set_header Cookie $http_cookie;
# set uppstream
proxy_pass https://lm_app;
}
{{- if .Values.rateLimiting.zone.accounts.enabled }}
# Define Rate Limiting Zones
limit_req_zone $binary_remote_addr zone=api_accounts:{{ .Values.rateLimiting.zone.accounts.size }} rate={{ .Values.rateLimiting.zone.accounts.rate }};
{{- end }}
server {
listen 0.0.0.0:8080 default_server;
client_max_body_size 4G;
# set the correct host(s) for your site
server_name localhost;
keepalive_timeout 60;
#ssl_certificate /nginx/certs/lm.crt;
#ssl_certificate_key /nginx/certs/lm.key;
# force HTTP traffic to HTTPS
error_page 497 https://$http_host$request_uri;
# define error pages
error_page 404 /error/404;
error_page 429 /error/429;
error_page 500 /error/500;
error_page 502 /error/502;
# location for error pages
location ~ ^/error {
# disable redirects
proxy_redirect off;
# rewrite headers
proxy_pass_header Server;
proxy_set_header X-Real-IP $http_x_forwarded_for;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $http_host;
proxy_set_header Cookie $http_cookie;
# various proxy settings
proxy_connect_timeout 600;
proxy_read_timeout 600;
proxy_send_timeout 600;
#proxy_intercept_errors on;
# set uppstream
proxy_pass https://lm_app;
}
# set static files location
location /static/ {
root /app/lifemonitor;
}
# if the path matches to root, redirect to the account page
location = / {
return 301 https://{{ .Values.externalServerName }}/account/;
}
location ~ ^/account {
# disable redirects
proxy_redirect off;
# rewrite headers
proxy_pass_header Server;
proxy_set_header X-Real-IP $http_x_forwarded_for;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $http_host;
proxy_set_header Cookie $http_cookie;
# various proxy settings
proxy_connect_timeout 600;
proxy_read_timeout 600;
proxy_send_timeout 600;
#proxy_intercept_errors on;
# set uppstream
proxy_pass https://lm_app;
{{ include "lifemonitor.api.rateLimiting" . | indent 12 }}
}
# set proxy location
location / {
#resolver 127.0.0.11 ipv6=off valid=30s;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
proxy_pass https://lm_app;
}
# set static files location
location /static/ {
root /app/lifemonitor;
}
# if the path matches to root, redirect to the account page
location = / {
return 301 https://{{ .Values.externalServerName }}/account/;
}
location ~ ^/account {
# disable redirects
proxy_redirect off;
# rewrite headers
proxy_pass_header Server;
proxy_set_header X-Real-IP $http_x_forwarded_for;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $http_host;
proxy_set_header Cookie $http_cookie;
# set uppstream
proxy_pass https://lm_app;
{{ include "lifemonitor.api.rateLimiting" . | indent 12 }}
}
# set proxy location
location / {
#resolver 127.0.0.11 ipv6=off valid=30s;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
proxy_pass https://lm_app;
}
}
nginx.conf: |-
# logs
pid /var/log/nginx/nginx.pid;
error_log /var/log/nginx/nginx.error.log warn;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
# Enables or disables the use of underscores in client request header fields.
# When the use of underscores is disabled, request header fields whose names contain underscores are marked as invalid and become subject to the ignore_invalid_headers directive.
# underscores_in_headers off;
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 128;
# Configure Log files
# access_log /var/log/nginx/access.log custom_format;
error_log /var/log/nginx/error.log warn;
# See Move default writable paths to a dedicated directory (#119)
# https://github.com/openresty/docker-openresty/issues/119
client_body_temp_path /var/run/nginx/nginx-client-body;
proxy_temp_path /var/run/nginx/nginx-proxy;
fastcgi_temp_path /var/run/nginx/nginx-fastcgi;
uwsgi_temp_path /var/run/nginx/nginx-uwsgi;
scgi_temp_path /var/run/nginx/nginx-scgi;
# Increase the buffer size
proxy_buffers 8 16k;
proxy_buffer_size 32k;
# various proxy settings
proxy_connect_timeout 180s;
proxy_read_timeout 180s;
proxy_send_timeout 180s;
keepalive_timeout 180s;
fastcgi_send_timeout 180s;
fastcgi_read_timeout 180s;
sendfile on;
#tcp_nopush on;
#gzip on;
include /etc/nginx/conf.d/*.conf;
# Don't reveal OpenResty version to clients.
# server_tokens off;
}
10 changes: 8 additions & 2 deletions k8s/templates/settings.secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,14 @@ stringData:
LIFEMONITOR_ADMIN_PASSWORD={{ .Values.lifemonitor.administrator.password }}
# Gunicorn settings
GUNICORN_WORKERS=1
GUNICORN_THREADS=2
GUNICORN_WORKERS={{ .Values.lifemonitor.gunicorn.workers | default 2 }}
GUNICORN_THREADS={{ .Values.lifemonitor.gunicorn.threads | default 4 }}
GUNICORN_WORKER_CONNECTIONS={{ .Values.lifemonitor.gunicorn.worker_connections | default 1000 }}
GUNICORN_MAX_REQUESTS={{ .Values.lifemonitor.gunicorn.max_requests | default 0 }}
GUNICORN_MAX_REQUESTS_JITTER={{ .Values.lifemonitor.gunicorn.max_requests_jitter | default 0 }}
GUNICORN_TIMEOUT={{ .Values.lifemonitor.gunicorn.timeout | default 30 }}
GUNICORN_GRACEFUL_TIMEOUT={{ .Values.lifemonitor.gunicorn.graceful_timeout | default 30 }}
GUNICORN_KEEP_ALIVE={{ .Values.lifemonitor.gunicorn.keep_alive | default 2 }}
# Set a warning message (displayed in the login screen and the user's profile page)
WARNING_MESSAGE={{- .Values.lifemonitor.warning_message | default "" }}
Expand Down
21 changes: 21 additions & 0 deletions k8s/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,27 @@ lifemonitor:
cpu: 0.5
memory: 1024Mi

# gunicon settings
gunicorn:
# The number of worker threads for handling requests.
# A positive integer generally in the 2-4 x $(NUM_CORES) range.
# You’ll want to vary this a bit to find the best for your particular application’s work load.
workers: 2
# The number of worker processes for handling requests.
# A positive integer generally in the 2-4 x $(NUM_CORES) range.
# You’ll want to vary this a bit to find the best for your particular application’s work load.
threads: 1
# The maximum number of simultaneous clients.
worker_connections: 1000
# The maximum number of requests a worker will process before restarting.
max_requests: 0 # (0 = unlimited)
# The maximum jitter to add to the max_requests setting.
max_requests_jitter: 0 # (0 = no jitter)
# Workers silent for more than this many seconds are killed and restarted.
timeout: 30
# The number of seconds to wait for requests on a Keep-Alive connection.
keepalive: 2

# configure resources for the init containers
initContainers:
initBackend:
Expand Down
6 changes: 6 additions & 0 deletions settings.conf
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,12 @@ POSTGRESQL_PASSWORD=foobar
# Gunicorn settings
GUNICORN_WORKERS=1
GUNICORN_THREADS=2
GUNICORN_MAX_REQUESTS=0
GUNICORN_MAX_REQUESTS_JITTER=0
GUNICORN_WORKER_CONNECTIONS=1000
GUNICORN_TIMEOUT=30
GUNICORN_GRACEFUL_TIMEOUT=30
GUNICORN_KEEP_ALIVE=2

# Dramatiq worker settings
WORKER_PROCESSES=1
Expand Down

0 comments on commit c9ca4d8

Please sign in to comment.