diff --git a/.gitignore b/.gitignore
index f24b757..7cf758e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,4 @@
secrets.env
+*.env
+.history
+
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
deleted file mode 100644
index b2cca57..0000000
--- a/.gitlab-ci.yml
+++ /dev/null
@@ -1,77 +0,0 @@
-variables:
- SSL_CERT_DIR: ""
- # DOCKER_HOST: tcp://docker:2375
- DOCKER_TLS_CERTDIR: ""
-
-stages:
- - debug
- - setup
- - staging
- - production
-
-debug:
- stage: debug
- image: alpine
- script:
- - export
-
-setup:
- stage: setup
- image: docker
- services:
- - name: docker:19.03.0-dind
- script:
- - docker login "$CI_REGISTRY" -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
-
-dry.run.local:
- stage: dry.run.local
- image: praqma/helmsman:v3.5.1
- script:
- - helmsman --debug --group staging -f helmsman.yaml -f helmsman/staging.yaml --dry-run
- only:
- - staging
-
-
-dryrun:
- stage: dryrun
- image: praqma/helmsman:v3.5.1
- script:
- - helmsman --debug --group staging -f helmsman/token.yaml -f helmsman.yaml -f helmsman/staging.yaml --dry-run
- only:
- - staging
-
-staging:
- stage: staging
- image: praqma/helmsman:v3.5.1
- script:
- - helmsman --apply --debug --group staging -f helmsman/token.yaml -f helmsman.yaml -f helmsman/staging.yaml
- only:
- - staging
-
-staging.local:
- stage: staging
- image: praqma/helmsman:v3.5.1
- script:
- - helmsman --apply --debug --group staging -f helmsman.yaml -f helmsman/staging.yaml
- only:
- - staging
-
-
-production:
- stage: production
- image: praqma/helmsman:v3.5.1
- script:
- - helmsman --apply --debug --group production -f helmsman/token.yaml -f helmsman.yaml helmsman/production.yaml
- only:
- - master
-
-
-# testing:
-# stage: testing
-# image: python
-# script:
-# - pip install pytest
-# - git clone https://github.com/jupyterhub/jupyterhub
-# - pytest
-# except:
-# - staging
diff --git a/Makefile b/Makefile
index 7d890e4..3fcaa10 100644
--- a/Makefile
+++ b/Makefile
@@ -1,120 +1,120 @@
#!make
BASEDIR = $(shell pwd)
-SOURCE:=$(shell source secrets.env)
+SOURCE:=$(shell source .env secrets.env)
-# include secrets.env
-# export
-deploy.production:
- helmsman --apply --debug --group production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/binder.yaml
+deploy.$1
-deploy.gpu.production:
- helmsman --apply --debug --group production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml
+deploy.$1:
+ helmsman --apply --debug --group $1 -f helmsman.yaml -f helmsman/$1.yaml -f helmsman/binder.yaml
-deploy.denbi.production:
- helmsman --apply --debug --group production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/denbi.yaml
+deploy.gpu.prod:
+ helmsman --apply --debug --group prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --subst-env-values --subst-ssm-values
+deploy.denbi.prod:
+ helmsman --apply --debug --group prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/denbi.yaml
-deploy.staging:
- helmsman --apply --debug --group staging -f helmsman.yaml -f helmsman/staging.yaml -f helmsman/binder.yaml
+
+deploy.dev:
+ helmsman --apply --debug --group dev -f helmsman.yaml -f helmsman/dev.yaml -f helmsman/binder.yaml
ci.deploy.prod:
- helmsman --apply --debug --group production -f helmsman/token.yaml -f helmsman.yaml -f helmsman/production.yaml
-ci.deploy.staging:
- helmsman --apply --debug --group staging -f helmsman/token.yaml -f helmsman.yaml -f helmsman/staging.yaml
+ helmsman --apply --debug --group prod -f helmsman/token.yaml -f helmsman.yaml -f helmsman/prod.yaml
+ci.deploy.dev:
+ helmsman --apply --debug --group dev -f helmsman/token.yaml -f helmsman.yaml -f helmsman/dev.yaml
binder.deploy.prod:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml --always-upgrade
-binder.deploy.staging:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml --always-upgrade
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml --always-upgrade
+binder.deploy.dev:
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml --always-upgrade
# Beta gpu enabled service
beta.binder.deploy.prod:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade
-beta.binder.deploy.staging:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml -f helmsman/gpu.yaml --always-upgrade
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade
+beta.binder.deploy.dev:
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml -f helmsman/gpu.yaml --always-upgrade
# Alpha openstack service
persistent.alpha.binder.deploy.prod:
- helmsman --apply --debug --target persistent-binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
-persistent.alpha.binder.deploy.staging:
- helmsman --apply --debug --target persistent-binderhub-production -f helmsman.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target persistent-binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
+persistent.alpha.binder.deploy.dev:
+ helmsman --apply --debug --target persistent-binderhub-prod -f helmsman.yaml -f helmsman/openstack.yaml --always-upgrade
#
alpha.binder.deploy.prod:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
-alpha.binder.deploy.staging:
- helmsman --apply --debug --target binderhub-staging -f helmsman.yaml -f helmsman/staging.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
+alpha.binder.deploy.dev:
+ helmsman --apply --debug --target binderhub-dev -f helmsman.yaml -f helmsman/dev.yaml -f helmsman/openstack.yaml --always-upgrade
alpha.deploy.prod:
- helmsman --apply --debug --group production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --group prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
alpha.cert.deploy.prod:
- helmsman --apply --debug --target cert-manager-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target cert-manager-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
alpha.trow.deploy.prod:
- helmsman --apply --debug --target trow -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target trow -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
alpha.registry.deploy.prod:
- helmsman --apply --debug --target docker-registry -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target docker-registry -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
alpha.ingress.deploy.prod:
- helmsman --apply --debug --target ingress-nginx -f helmsman.yaml -f helmsman/production.yaml -f helmsman/openstack.yaml --always-upgrade
+ helmsman --apply --debug --target ingress-nginx -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/openstack.yaml --always-upgrade
# Beta gpu enabled service
gpu.beta.binder.deploy.prod:
- helmsman --apply --debug --target binderhub-production-gpu -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade
-gpu.beta.binder.deploy.staging:
- helmsman --apply --debug --target binderhub-production-gpu -f helmsman.yaml -f helmsman/gpu.yaml --always-upgrade
+ helmsman --apply --debug --target binderhub-prod-gpu -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade
+gpu.beta.binder.deploy.dev:
+ helmsman --apply --debug --target binderhub-prod-gpu -f helmsman.yaml -f helmsman/gpu.yaml --always-upgrade
gpu.beta.triton.deploy.prod:
- helmsman --apply --debug --target tritoninferenceserver -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade
+ helmsman --apply --debug --target tritoninferenceserver -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade
# Dry runs for testing
binder.deploy.prod.dry:
- helmsman --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml --always-upgrade --dry-run
+ helmsman --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml --always-upgrade --dry-run
beta.binder.deploy.prod.dry:
- helmsman --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade --dry-run
+ helmsman --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade --dry-run
gpu.beta.binder.deploy.prod.dry:
- helmsman --debug --target binderhub-production-gpu -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade --dry-run
+ helmsman --debug --target binderhub-prod-gpu -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade --dry-run
gpu.cert.deploy.prod:
- helmsman --apply --debug --target cert-manager-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade
+ helmsman --apply --debug --target cert-manager-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade
-gpu.nvdp.production:
- helmsman --apply --debug --target nvidia-gpu-operator -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gpu.yaml --always-upgrade
+gpu.nvdp.prod:
+ helmsman --apply --debug --target nvidia-gpu-operator -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gpu.yaml --always-upgrade
gke.binder.deploy.prod:
- helmsman --apply --debug --target binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --target binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
gke.binder.deploy.nginx:
- helmsman --apply --debug --target ingress-nginx -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
-gke.binder.deploy.staging:
- helmsman --apply --debug --target binderhub-staging -f helmsman.yaml -f helmsman/staging.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --target ingress-nginx -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
+gke.binder.deploy.dev:
+ helmsman --apply --debug --target binderhub-dev -f helmsman.yaml -f helmsman/dev.yaml -f helmsman/gke.yaml --always-upgrade
-gke.nvdp.production:
- helmsman --apply --debug --target nvidia-device-plugin -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
+gke.nvdp.prod:
+ helmsman --apply --debug --target nvidia-device-plugin -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
gke.deploy.prod:
- helmsman --apply --debug --group production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --group prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
gke.triton.deploy.prod:
- helmsman --apply --debug --target tritoninferenceserver -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --target tritoninferenceserver -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
gke.prometheus.deploy.prod:
- helmsman --apply --debug --target prometheus -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --target prometheus -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
gke.persistent.alpha.binder.deploy.prod:
- helmsman --apply --debug --target persistent-binderhub-production -f helmsman.yaml -f helmsman/production.yaml -f helmsman/gke.yaml --always-upgrade
-gke.persistent.alpha.binder.deploy.staging:
- helmsman --apply --debug --target persistent-binderhub-production -f helmsman.yaml -f helmsman/gke.yaml --always-upgrade
+ helmsman --apply --debug --target persistent-binderhub-prod -f helmsman.yaml -f helmsman/prod.yaml -f helmsman/gke.yaml --always-upgrade
+gke.persistent.alpha.binder.deploy.dev:
+ helmsman --apply --debug --target persistent-binderhub-prod -f helmsman.yaml -f helmsman/gke.yaml --always-upgrade
diff --git a/README.md b/README.md
index ab8db7e..c37b1d7 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,7 @@ This will give the GitLab runner almost full administrative access to the cluste
bash gitlab-ci/install-helm.sh
helmfile -e minikube sync
-For production
+For prod
helmfile -e default sync -->
diff --git a/binderhub-persistent/production/config.yaml b/binderhub-persistent/production/config.yaml
deleted file mode 100644
index e69de29..0000000
diff --git a/binderhub-persistent/staging/config.yaml b/binderhub-persistent/staging/config.yaml
deleted file mode 100644
index e69de29..0000000
diff --git a/binderhub/config.yaml b/binderhub/config.yaml
index 87c2651..0425af2 100644
--- a/binderhub/config.yaml
+++ b/binderhub/config.yaml
@@ -28,12 +28,6 @@ config:
banner_message: |
This is a public Beta and liable to downtime
- # build_image: "aicrowd/repo2docker"
- # build_image: "aicrowd/repo2docker:latest"
- # base_url: "/staging" #Doesn't work
- # networkPolicy:
- # enabled: true
-# # Good until this:
jupyterhub:
proxy:
@@ -130,12 +124,20 @@ jupyterhub:
# cmd: start-singleuser.sh
# cmd: null
cpu:
- limit: 2
- guarantee: 1
+ limit: 4
+ guarantee: 2
memory:
- limit: 6G
- guarantee: 1G
- # storage:
+ limit: 8G
+ guarantee: 6G
+ storage:
+ extraVolumes:
+ - name: biostudies-nfs-binder
+ persistentVolumeClaim:
+ claimName: biostudies-nfs-pvc-binder
+ extraVolumeMounts:
+ - name: biostudies-nfs-binder
+ mountPath: /home/jovyan/biostudies
+ readOnly: true
# type: none
# extraVolumes:
# - name: nfs-bs-ftp
@@ -151,11 +153,35 @@ jupyterhub:
# mountPath: /home/jovyan/biostudies
# readOnly: true
profileList:
- - display_name: "GPU Server"
- description: "Spawns a notebook server with access to a GPU"
+ # - display_name: "GPU Server"
+ # description: "Spawns a notebook server with access to a GPU"
+ # kubespawner_override:
+ # extra_resource_limits:
+ # smarter-devices/fuse: "1"
+ - display_name: "BioStudies"
+ description: "Spawns a notebook server BioStudies attached"
+ slug: "Biostudies"
kubespawner_override:
extra_resource_limits:
smarter-devices/fuse: "1"
+ # image: cschranz/gpu-jupyter
+ # cpu_limit: 32
+ # cpu_guarantee: 16
+ # mem_limit: 32G
+ # mem_guarantee: 32G
+ # volume_mounts:
+ # - name: biostudies-nfs
+ # mountPath: /home/jovyan/biostudies
+ storage:
+ # type: none
+ extraVolumes:
+ - name: biostudies-nfs-binder
+ persistentVolumeClaim:
+ claimName: biostudies-nfs-pvc-binder
+ extraVolumeMounts:
+ - name: biostudies-nfs-binder
+ mountPath: /home/jovyan/biostudies
+ readOnly: true
# - display_name: "No GPU"
# description: "Spawns a notebook server with access to a GPU"
# proxy:
@@ -193,16 +219,7 @@ extraVolumes:
extraVolumeMounts:
- name: custom-templates
mountPath: /etc/binderhub/custom
-
-dind:
- enabled: true
- resources:
- requests:
- cpu: "2"
- memory: 4Gi
- limits:
- cpu: "4"
- memory: 6Gi
+imageBuilderType: dind
ingress:
enabled: true
@@ -213,7 +230,7 @@ ingress:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
certmanager.k8s.io/acme-http01-edit-in-place: "true"
# https:
# enabled: true
diff --git a/binderhub/persistentVolumes/biostudies.yaml b/binderhub/persistentVolumes/biostudies.yaml
index 5f87472..21cae22 100644
--- a/binderhub/persistentVolumes/biostudies.yaml
+++ b/binderhub/persistentVolumes/biostudies.yaml
@@ -1,18 +1,21 @@
apiVersion: v1
kind: PersistentVolume
metadata:
- name: nfs-bs-ftp-pv-binderhub
+ name: biostudies-nfs-binder
spec:
storageClassName: nfs
capacity:
storage: 1Gi
accessModes:
- ReadOnlyMany
+ mountOptions:
+ - nfsvers=3
nfs:
# path: /tmp
# server: 172.17.0.2
- server: 10.35.105.251 # Change this!
- path: "/fg_biostudies/.adm/databases/prod/submission/ftp/"
+ # server: 10.35.105.251 # Change this!
+ server: 10.35.198.252
+ path: "/fg_biostudies/.adm/databases/prod/submissions/ftp/"
readOnly: true
# claimRef:
# name: nfs-bs-ftp-pvc
@@ -22,7 +25,7 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: "nfs-bs-ftp-pvc"
+ name: biostudies-nfs-pvc-binder
spec:
storageClassName: nfs
accessModes:
@@ -30,4 +33,4 @@ spec:
resources:
requests:
storage: 1Gi
- volumeName: "nfs-bs-ftp-pv-binderhub"
+ volumeName: biostudies-nfs-binder
diff --git a/binderhub/production/config.yaml b/binderhub/production/config.yaml
index 7e2100d..7f757b3 100644
--- a/binderhub/production/config.yaml
+++ b/binderhub/production/config.yaml
@@ -38,7 +38,7 @@ ingress:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
# https:
# enabled: true
# type: nginx
@@ -48,5 +48,5 @@ ingress:
- "binder.bioimagearchive.org"
dind:
# enabled: true
- hostLibDir: /var/lib/dind/production
- hostSocketDir: /var/run/dind/production
\ No newline at end of file
+ hostLibDir: /var/lib/dind/prod
+ hostSocketDir: /var/run/dind/prod
\ No newline at end of file
diff --git a/binderhub/production/config_beta_GKE.yaml b/binderhub/production/config_beta_GKE.yaml
index 1dea5b5..759368a 100644
--- a/binderhub/production/config_beta_GKE.yaml
+++ b/binderhub/production/config_beta_GKE.yaml
@@ -2,8 +2,8 @@
replicas: 1
dind:
enabled: true
- hostLibDir: /var/lib/dind/gke/production
- hostSocketDir: /var/run/dind/gke/production
+ hostLibDir: /var/lib/dind/gke/prod
+ hostSocketDir: /var/run/dind/gke/prod
resources:
requests:
cpu: "0.5"
@@ -44,7 +44,7 @@ jupyterhub:
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
- # cert-manager.io/cluster-issuer: letsencrypt-production
+ # cert-manager.io/cluster-issuer: letsencrypt-prod
# # cert-manager.io/issue-temporary-certificate: "true"
# # acme.cert-manager.io/http01-edit-in-place: "true"
# cert-manager.k8s.io/acme-challenge-type: http01
@@ -93,7 +93,7 @@ ingress:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
# tls:
# # - secretName: binder-bioimagearchive-org-cert
# # hosts:
diff --git a/binderhub/production/config_beta_openstack.yaml b/binderhub/production/config_beta_openstack.yaml
index 11bca07..a8d8d05 100644
--- a/binderhub/production/config_beta_openstack.yaml
+++ b/binderhub/production/config_beta_openstack.yaml
@@ -30,7 +30,7 @@ jupyterhub:
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
- # cert-manager.io/cluster-issuer: letsencrypt-production
+ # cert-manager.io/cluster-issuer: letsencrypt-prod
# # cert-manager.io/issue-temporary-certificate: "true"
# # acme.cert-manager.io/http01-edit-in-place: "true"
# cert-manager.k8s.io/acme-challenge-type: http01
@@ -66,7 +66,7 @@ ingress:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
# tls:
# # - secretName: binder-bioimagearchive-org-cert
# # hosts:
diff --git a/binderhub/staging.yaml b/binderhub/staging.yaml
index 02af371..1acf26d 100644
--- a/binderhub/staging.yaml
+++ b/binderhub/staging.yaml
@@ -1,4 +1,4 @@
config:
BinderHub:
# hub_url: http://10.43.114.27
- hub_url: http://45.86.170.228/hub-staging/
\ No newline at end of file
+ hub_url: http://45.86.170.228/hub-dev/
\ No newline at end of file
diff --git a/binderhub/staging/config.yaml b/binderhub/staging/config.yaml
index c879fe5..e876c2d 100644
--- a/binderhub/staging/config.yaml
+++ b/binderhub/staging/config.yaml
@@ -1,6 +1,6 @@
config:
BinderHub:
- hub_url: http://staging.binder.bioimagearchive.org/binderhub/
+ hub_url: http://dev.binder.bioimagearchive.org/binderhub/
jupyterhub:
hub:
@@ -8,17 +8,17 @@ jupyterhub:
ingress:
enabled: true
hosts:
- - staging.binder.bioimagearchive.org
+ - dev.binder.bioimagearchive.org
ingress:
enabled: true
hosts:
- - "staging.binder.bioimagearchive.org"
+ - "dev.binder.bioimagearchive.org"
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
https:
enabled: true
type: nginx
@@ -30,5 +30,5 @@ service:
dind:
- hostLibDir: /var/lib/dind/staging
- hostSocketDir: /var/run/dind/staging
\ No newline at end of file
+ hostLibDir: /var/lib/dind/dev
+ hostSocketDir: /var/run/dind/dev
\ No newline at end of file
diff --git a/binderhub/staging/config_beta.yaml b/binderhub/staging/config_beta.yaml
index 0f6d9b9..430c433 100644
--- a/binderhub/staging/config_beta.yaml
+++ b/binderhub/staging/config_beta.yaml
@@ -1,6 +1,6 @@
config:
BinderHub:
- hub_url: http://staging.beta.binder.bioimagearchive.org/binderhub/
+ hub_url: http://dev.beta.binder.bioimagearchive.org/binderhub/
banner_message: |
Beta service with more RAM and CPU
jupyterhub:
@@ -9,9 +9,9 @@ jupyterhub:
ingress:
enabled: true
hosts:
- - staging.beta.binder.bioimagearchive.org
+ - dev.beta.binder.bioimagearchive.org
ingress:
enabled: true
hosts:
- - "staging.beta.binder.bioimagearchive.org"
\ No newline at end of file
+ - "dev.beta.binder.bioimagearchive.org"
\ No newline at end of file
diff --git a/binderhub/staging/config_beta_GPU.yaml b/binderhub/staging/config_beta_GPU.yaml
index 134885c..0442216 100644
--- a/binderhub/staging/config_beta_GPU.yaml
+++ b/binderhub/staging/config_beta_GPU.yaml
@@ -4,7 +4,7 @@ jupyterhub:
ingress:
enabled: true
hosts:
- - staging.gpu.beta.binder.bioimagearchive.org
+ - dev.gpu.beta.binder.bioimagearchive.org
profileList:
- display_name: "GPU Server"
@@ -17,7 +17,7 @@ jupyterhub:
config:
BinderHub:
- hub_url: http://staging.gpu.beta.binder.bioimagearchive.org/binderhub/
+ hub_url: http://dev.gpu.beta.binder.bioimagearchive.org/binderhub/
banner_message: |
Beta service with more RAM and CPU and GPU Support
@@ -25,4 +25,4 @@ config:
ingress:
enabled: true
hosts:
- - "staging.gpu.beta.binder.bioimagearchive.org"
+ - "dev.gpu.beta.binder.bioimagearchive.org"
diff --git a/cert-managment/cluster_issuer.yaml b/cert-managment/cluster_issuer.yaml
index 2b43d80..939c725 100644
--- a/cert-managment/cluster_issuer.yaml
+++ b/cert-managment/cluster_issuer.yaml
@@ -1,7 +1,7 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
- name: letsencrypt-production
+ name: letsencrypt-prod
namespace: cert-manager
spec:
acme:
@@ -10,10 +10,10 @@ spec:
# certificates, and issues related to your account.
email: ctr26@ebi.ac.uk
server: https://acme-v02.api.letsencrypt.org/directory
- # server: https://acme-staging-v02.api.letsencrypt.org/directory
+ # server: https://acme-dev-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
- name: letsencrypt-production
+ name: letsencrypt-prod
# Add a single challenge solver, HTTP01 using nginx
solvers:
- http01:
diff --git a/docker-registry/values.yaml b/docker-registry/values.yaml
index 92de692..7559eff 100644
--- a/docker-registry/values.yaml
+++ b/docker-registry/values.yaml
@@ -9,7 +9,7 @@ ingress:
# kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.k8s.io/acme-challenge-type: http01
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/proxy-body-size: "0"
tls:
- secretName: docker-alpha-binder-bioimagearchive-org
@@ -34,7 +34,7 @@ ingress:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# cert-manager.k8s.io/acme-challenge-type: http01
-# cert-manager.io/cluster-issuer: letsencrypt-production
+# cert-manager.io/cluster-issuer: letsencrypt-prod
# # https:
# # enabled: true
# # type: nginx
diff --git a/docs/deployment.md b/docs/deployment.md
index 897ccd8..00cf455 100644
--- a/docs/deployment.md
+++ b/docs/deployment.md
@@ -12,19 +12,19 @@ brew install helmsman
Once installed the full environment can be deployed using
```
-helmsman --apply -f helmsman.yaml -f helmsman-production.yaml
+helmsman --apply -f helmsman.yaml -f helmsman-prod.yaml
```
-This can be limited to only the staging environment say using
+This can be limited to only the dev environment say using
```
-helmsman --apply -f helmsman.yaml -f helmsman-production.yaml --group staging
+helmsman --apply -f helmsman.yaml -f helmsman-prod.yaml --group dev
```
Helmsman is expecting that the context name of the cluster you are deploying to is called CPU. To deploy to a cluster called say GPU, and with the necessary GPU overrides for JupyterHub and Binderhub, you append the `helmsman-gpu.yaml`
```
-helmsman --apply -f helmsman.yaml -f helmsman-production.yaml -f helmsman-gpu.yaml
+helmsman --apply -f helmsman.yaml -f helmsman-prod.yaml -f helmsman-gpu.yaml
```
`helmsman-token.yaml` is reserved for runners within the cluster deploying to the cluster as would be the case with GitLab, which is how the BIA-Binder is deployed.
diff --git a/gitlab-ci/deploy.sh b/gitlab-ci/deploy.sh
deleted file mode 100644
index 7390bac..0000000
--- a/gitlab-ci/deploy.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-
-set -eu
-
-# Required environment variables:
-test -n "$CI_ENVIRONMENT_NAME"
-APPLICATION_NAME="${CI_ENVIRONMENT_NAME#*-}"
-test -n "$APPLICATION_NAME"
-# Since we're running inside a Kubernetes pod we can use internal kube-dns
-JUPYTERHUB_URL="http://proxy-public.jupyterhub-${APPLICATION_NAME}/${APPLICATION_NAME}"
-
-
-# TODO: Use "apply" instead of "sync" to update only changed charts
-# Waiting for https://github.com/roboll/helmfile/issues/458
-helmfile --selector application=${APPLICATION_NAME} sync
-
-
-# Is this necessary? Or should helm --wait take care of this?
-i=1
-while [ $i -lt 60 ]; do
- sleep 10
- let i++
- if curl --fail -s -L --max-time 2 $JUPYTERHUB_URL/hub/api; then
- break
- fi
- echo "Waited ${i}0 seconds"
-done
-
-# Check the response contains the expected content
-curl --fail -s $JUPYTERHUB_URL/hub/api | grep version
diff --git a/gitlab-ci/install-helm.sh b/gitlab-ci/install-helm.sh
deleted file mode 100644
index f8bb5bb..0000000
--- a/gitlab-ci/install-helm.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# Install and secure access to Helm
-set -eux
-
-echo '{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name": "tiller"}}' | kubectl apply -n kube-system -f -
-echo '{"apiVersion": "rbac.authorization.k8s.io/v1","kind": "ClusterRoleBinding","metadata":{"name":"tiller"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind": "ClusterRole","name": "cluster-admin"},"subjects":[{"kind": "ServiceAccount","name": "tiller","namespace": "kube-system"}]}' | kubectl apply -n kube-system -f -
-
-helm init --upgrade --service-account tiller
-
-# https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/8209ea252a1ec2cdb90640ea2a2735467b4aadbc/doc/source/security.md#secure-access-to-helm
-kubectl --namespace=kube-system patch deployment tiller-deploy --type=json --patch='[{"op": "add", "path": "/spec/template/spec/containers/0/command", "value": ["/tiller", "--listen=localhost:44134"]}]'
-
-echo "waiting for tiller"
-kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy
diff --git a/helmfile.yaml b/helmfile.yaml
index 2d2e1ba..a65116f 100644
--- a/helmfile.yaml
+++ b/helmfile.yaml
@@ -18,7 +18,7 @@ repositories:
environments:
default:
values:
- - production.yaml
+ - prod.yaml
minikube:
values:
- minikube.yaml
@@ -29,19 +29,19 @@ releases:
######################################################################
-# JupyterHub staging
+# JupyterHub dev
- # - name: jupyterhub-staging
+ # - name: jupyterhub-dev
# chart: jupyterhub/jupyterhub
# # Wed, 23 Jan 2019 15:37:58 +0000
# # version: 0.8-d470e4f
- # namespace: jupyterhub-staging
+ # namespace: jupyterhub-dev
# labels:
- # deployment: staging
- # application: staging
+ # deployment: dev
+ # application: dev
# values:
# - values/jupyterhub-config.yaml
- # {{ range .Environment.Values.jupyterhub_staging_overrides }}
+ # {{ range .Environment.Values.jupyterhub_dev_overrides }}
# - {{ . }}
# {{ end }}
# set:
@@ -54,7 +54,7 @@ releases:
# # version: 0.8-d470e4f
# namespace: jupyterhub-sandbox
# labels:
- # deployment: production
+ # deployment: prod
# application: sandbox
# values:
# - values/jupyterhub-config.yaml
@@ -77,7 +77,7 @@ releases:
# version: 0.8-d470e4f
namespace: jupyterhub-github
labels:
- deployment: production
+ deployment: prod
application: github
values:
- values/jupyterhub-config.yaml
diff --git a/helmsman.yaml b/helmsman.yaml
index 9cda42e..7fe774f 100644
--- a/helmsman.yaml
+++ b/helmsman.yaml
@@ -1,28 +1,3 @@
-# context: minikube
-settings:
- kubeContext: bia-binder
- # globalHooks:
- # preUpgrade: "ingress/gpu_redirect.yaml"
-
-# # username = "admin"
-# # password = "$K8S_PASSWORD"
-# # clusterURI = "https://192.168.99.100:8443"
-# settings:
-# # kubeContext: "config" # the name of the context to be created
-# # bearerToken: true
-# # clusterURI: "https://kubernetes.default"
-# # # bearerTokenPath: "/path/to/custom/bearer/token/file"
-# kubeContext: "internal"
-# clusterURI: "https://kubernetes.default"
-# bearerToken: true
-# bearerTokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token" # optional if a token is provided by the user in another location
-
-# settings:
- # globalHooks:
- # preInstall:
- # # - "persistentVolumes/biostudies.yaml"
- # - "ingress/gpu_redirect.yaml"
-
metadata:
organization: EBI
maintainer: Craig Russell (ctr26@ebi.ac.uk)
@@ -30,14 +5,14 @@ metadata:
helmRepos:
jupyterhub: "https://jupyterhub.github.io/helm-chart/"
- daskgateway: "https://dask.org/dask-gateway-helm-repo/"
+ # daskgateway: "https://dask.org/dask-gateway-helm-repo/"
+ dask: "https://helm.dask.org/"
jetstack: "https://charts.jetstack.io"
persistent_binderhub: "https://gesiscss.github.io/persistent_binderhub"
nginx-stable: "https://helm.nginx.com/stable"
ingress-nginx: "https://kubernetes.github.io/ingress-nginx"
trow: "https://trow.io"
twuni: "https://helm.twun.io"
- # triton: "https://helm.ngc.nvidia.com/nvidia"
triton: "https://ctr26.github.io/tritoninferenceserver/"
# prometheus-community: "https://prometheus-community.github.io/helm-charts"
# kube-state-metrics: "https://kubernetes.github.io/kube-state-metrics"
@@ -48,17 +23,8 @@ appsTemplates:
binderhub: &binderhub
name: "binderhub"
chart: "jupyterhub/binderhub"
- version: "0.2.0-n645.h06139f0"
+ version: "1.0.0-0.dev.git.3035.h32213a4"
priority: -8
- # version: "0.2.0-n575.h2dc091f"
- # version: "0.2.0-n467.hbc700d4"
- # version: "0.2.0-n519.hf5c1a19"
- # version: "0.2.0-n217.h35366ea"
- # version: "0.2.0-n217.h35366ea"
- # helmFlags:
- # - "--debug"
- # # test: true
- # # protected: true
wait: false # Lethal for binderhub
timeout: 180
enabled: false
@@ -66,90 +32,30 @@ appsTemplates:
postUpgrade: "fuser/fuser.yml"
postInstall: "fuser/fuser.yml"
set:
- # registry.username: "$CI_REGISTRY_USER"
- # registry.password: "$CI_REGISTRY_PASSWORD"
- # config.BinderHub.image_prefix: "$CI_REGISTRY_IMAGE"
jupyterhub.hub.services.binder.apiToken: "$SECRET_JUPYTERHUB_PROXY_TOKEN"
jupyterhub.proxy.secretToken: "$SECRET_BINDERHUB_SERVICE"
- config.GitHubRepoProvider.access_token: "$SECRET_GITHUBREPOPROVIDER"
- # jupyterhub.auth.github.clientId: "$SECRET_GITHUB_CLIENTID"
- # jupyterhub.auth.github.clientSecret: "$SECRET_GITHUB_CLIENTSECRET"
- jupyterhub.hub.config.GitHubOAuthenticator.client_id: "$SECRET_GITHUB_CLIENTID"
- jupyterhub.hub.config.GitHubOAuthenticator.client_secret: "$SECRET_GITHUB_CLIENTSECRET"
- # config.BinderHub.image_prefix: "$CI_REGISTRY_IMAGE"
- # config.BinderHub.hub_url: "$HOST_NAME/binderhub/"
- # hooks:
- # preUpgrade: "binderhub/persistentVolumes/biostudies.yaml"
- # hub_url: http://10.43.114.27
- # config.DockerRegistry.url: "https://dockerhub.ebi.ac.uk"
- # config.DockerRegistry.token_url: ""
- # registry.url: "https://dockerhub.ebi.ac.uk"
- # registry.username: "$CI_REGISTRY_USER"
- # registry.password: "$CI_REGISTRY_PASSWORD"
- # config.DockerRegistry.url: https://trow.ctr26.duckdns.org
- # config.DockerRegistry.token_url: ""
- # config.DockerRegistry.token_url: "https://trow.ctr26.duckdns.org/v2/token?service="
- # config.BinderHub.image_prefix: "trow.ctr26.duckdns.org/"
- # registry.url: https://trow.ctr26.duckdns.org
- # registry.username: "bia"
- # registry.password: "***REMOVED***="
- # config.BinderHub.use_registry: true
- # config.DockerRegistry.token_url: None
- # config.DockerRegistry.token_url: "https://registry.ctr26.duckdns.org/v2/token?service="
config.DockerRegistry.token_url: ""
config.BinderHub.image_prefix: "registry.binder.bioimagearchive.org/"
registry.url: https://registry.binder.bioimagearchive.org
- # registry.username: "admin"
- # registry.password: "password"
config.BinderHub.use_registry: true
+
jupyterhub: &jupyterhub
name: "jupyterhub"
chart: "jupyterhub/jupyterhub"
version: "0.10.2-n047.hb387f987"
helmFlags:
- "--debug"
- # test: true
- # protected: true
wait: false
timeout: 1200
enabled: false
priority: -9
set:
proxy.secretToken: "$SECRET_JUPYTERHUB_PROXY_TOKEN"
- # auth.github.clientId: "$SECRET_GITHUB_CLIENTID"
- # auth.github.clientSecret: "$SECRET_GITHUB_CLIENTSECRET"
- # ingress.hosts: "binder.bioimagearchive.org"
- # hooks:
- # preUpgrade: "persistentVolumes/biostudies.yaml"
-
- persistent-binderhub: &persistent-binderhub
- name: "persistent-binderhub"
- chart: "persistent_binderhub/persistent_binderhub"
- version: "0.2.0-n563"
- wait: false # Lethal for binderhub
- timeout: 1200
- enabled: false
- priority: -11
- set:
- # binderhub.registry.username: "$CI_REGISTRY_USER"
- # binderhub.registry.password: "$CI_REGISTRY_PASSWORD"
- # binderhub.registry.url: https://registry.binder.bioimagearchive.org
- binderhub.jupyterhub.hub.services.binder.apiToken: "$SECRET_JUPYTERHUB_PROXY_TOKEN"
- binderhub.jupyterhub.proxy.secretToken: "$SECRET_BINDERHUB_SERVICE"
- # binderhub.config.BinderHub.image_prefix: "$CI_REGISTRY_IMAGE"
- # binderhub.config.BinderHub.image_prefix: "registry.alpha.binder.bioimagearchive.org/binder-"
- # binderhub.config.BinderHub.image_prefix: "registry.binder.bioimagearchive.org/"
- binderhub.config.GitHubRepoProvider.access_token: "$SECRET_GITHUBREPOPROVIDER"
- binderhub.config.DockerRegistry.token_url: ""
- binderhub.config.BinderHub.image_prefix: "registry.binder.bioimagearchive.org/"
- binderhub.registry.url: https://registry.binder.bioimagearchive.org
-
-
dask: &dask
- name: "daskgateway"
- chart: "daskgateway/dask-gateway"
- version: "0.9.0"
+ name: "dask-gateway"
+ chart: "dask/dask-gateway"
+ version: "2022.4.0"
enabled: false
priority: -10
timeout: 120
@@ -166,45 +72,19 @@ apps:
- trow/values.yaml
- trow/values.yaml
- # tritoninferenceserver:
- # name: "tritoninferenceserver"
- # namespace: "tritoninferenceserver"
- # chart: "triton/tritoninferenceserver"
- # enabled: false
- # version: "1.0.0"
- # valuesFiles:
- # - triton/values.yaml
-
nvidia-device-plugin:
name: "nvidia-device-plugin"
namespace: "nvidia-device-plugin"
chart: "nvdp/nvidia-device-plugin"
- group: "production"
+ group: "prod"
enabled: false
version: "0.10.0"
- # valuesFiles:
- # - triton/values.yaml
-
- # prometheus:
- # name: "prometheus"
- # namespace: "tritoninferenceserver"
- # chart: "prometheus-community/prometheus"
- # # chart: "prometheus/prometheus-adapter"
- # enabled: false
- # version: "14.7.1"
- # # hooks:
- # # postUpgrade:
- # # - "triton/config_map.yaml"
- # # - "triton/deployment.yaml"
- # # postInstall:
- # # - "triton/config_map.yaml"
- # # - "triton/deployment.yaml"
docker-registry:
name: "docker-registry"
namespace: "docker-registry"
chart: "twuni/docker-registry"
- group: "production"
+ group: "prod"
enabled: false
priority: -3
version: "2.1.0"
@@ -215,190 +95,118 @@ apps:
valuesFiles:
- docker-registry/values.yaml
-
- # nginx-ingress:
- # name: "nginx-ingress"
- # namespace: "nginx-ingress"
- # chart: "nginx-stable/nginx-ingress"
- # enabled: false
- # version: "3.35.0"
- # valuesFiles:
- # - ingress/nginx-ingress.yaml
-
ingress-nginx:
name: "ingress-nginx"
namespace: "ingress-nginx"
chart: "ingress-nginx/ingress-nginx"
- group: "production"
+ group: "prod"
enabled: false
- priority: 0
- version: "3.35.0"
+ priority: -100
+ version: "4.6.0"
valuesFiles:
- ingress/nginx-ingress.yaml
- # daskgateway-staging:
- # <<: *dask
- # name: "daskgateway-staging"
- # group: "staging"
- # namespace: "daskgateway-staging"
- # # valuesFiles:
- # # - "daskgateway/staging.yaml"
-
daskgateway:
<<: *dask
name: "daskgateway"
- group: "production"
+ group: "prod"
namespace: "daskgateway"
# valuesFiles:
- # - "daskgateway/production.yaml"
+ # - "daskgateway/prod.yaml"
- binderhub-staging:
+ binderhub-dev:
<<: *binderhub
- name: "binderhub-staging"
- namespace: "binderhub-staging"
- group: "staging"
+ name: "binderhub-dev"
+ namespace: "binderhub-dev"
+ group: "dev"
# protected: false
valuesFiles:
- # - "binderhub/secret.yaml"
- "binderhub/config.yaml"
- # - "binderhub/secret.yaml"
- - "binderhub/staging/config.yaml"
- # - "binderhub/ebi_dockerhub.yaml"
- # set:
- # config.BinderHub.base_url: "staging"
- # ingress.pathSuffix: "/staging"
- # registry.username: "$SECRET_DOCKERHUB_USERNAME"
- # registry.password: "$SECRET_DOCKERHUB_PASSWORD"
- # set:
- # jupyterhub.hub.baseUrl: "/binderhub/staging/"
- # config.BinderHub.hub_url: "$HOST_NAME"
+ - "binderhub/dev/config.yaml"
- binderhub-production:
+ binderhub-prod:
<<: *binderhub
- name: "binderhub-production"
- namespace: "binderhub-production"
- group: "production"
+ name: "binderhub-prod"
+ namespace: "binderhub-prod"
+ group: "prod"
valuesFiles:
- "binderhub/config.yaml"
- - "binderhub/production/config.yaml"
+ - "binderhub/prod/config.yaml"
# - "binderhub/persistentVolumes.yaml"
- persistent-binderhub-production:
- <<: *persistent-binderhub
- name: "persistent-binderhub-production"
- namespace: "persistent-binderhub-production"
- enabled: false
- group: "production"
- valuesFiles:
- - "binderhub-persistent/config.yaml"
- - "binderhub-persistent/production/config.yaml"
- # - "binderhub/persistentVolumes.yaml"
-
-
- binderhub-production-gpu:
+ binderhub-prod-gpu:
<<: *binderhub
enabled: false
- name: "binderhub-production-gpu"
- namespace: "binderhub-production-gpu"
- group: "production"
+ name: "binderhub-prod-gpu"
+ namespace: "binderhub-prod-gpu"
+ group: "prod"
# valuesFiles:
- binderhub-staging-gpu:
+ binderhub-dev-gpu:
<<: *binderhub
enabled: false
- name: "binderhub-staging-gpu"
- namespace: "binderhub-staging-gpu"
- group: "staging"
- # valuesFiles:
- # - "binderhub/config.yaml"
- # - "binderhub/production/config.yaml"
- # - "binderhub/persistentVolumes.yaml"
- # binderhub-production-gpu:
- # <<: *binderhub
- # name: "binderhub-production-gpu"
- # namespace: "binderhub-production"
- # group: "production-gpu"
- # valuesFiles:
- # - "binderhub/config.yaml"
- # - "binderhub/production/config.yaml"
- # - "binderhub/gpu_support.yaml"
+ name: "binderhub-dev-gpu"
+ namespace: "binderhub-dev-gpu"
+ group: "dev"
- jupyterhub-sandbox-staging:
+ jupyterhub-sandbox-dev:
<<: *jupyterhub
- name: "jupyterhub-sandbox-staging"
- group: "staging"
- namespace: "jupyterhub-sandbox-staging"
+ name: "jupyterhub-sandbox-dev"
+ group: "dev"
+ namespace: "jupyterhub-sandbox-dev"
# hooks:
- # preInstall: "jupyterhub/staging/volumes/biostudies.yaml"
+ # preInstall: "jupyterhub/dev/volumes/biostudies.yaml"
valuesFiles:
- "jupyterhub/config.yaml"
# - "jupyterhub/persistentVolumes.yaml"
- - "jupyterhub/staging/config.yaml"
+ - "jupyterhub/dev/config.yaml"
- "jupyterhub/sandbox.yaml"
- # - "jupyterhub/staging/sandbox.yaml"
+ # - "jupyterhub/dev/sandbox.yaml"
- jupyterhub-sandbox-production:
+ jupyterhub-sandbox-prod:
<<: *jupyterhub
- name: "jupyterhub-sandbox-production"
- group: "production"
- namespace: "jupyterhub-sandbox-production"
+ name: "jupyterhub-sandbox-prod"
+ group: "prod"
+ namespace: "jupyterhub-sandbox-prod"
# hooks:
- # preInstall: "jupyterhub/production/volumes/biostudies.yaml"
+ # preInstall: "jupyterhub/prod/volumes/biostudies.yaml"
valuesFiles:
- "jupyterhub/config.yaml"
# - "jupyterhub/persistentVolumes.yaml"
- - "jupyterhub/production/config.yaml"
+ - "jupyterhub/prod/config.yaml"
- "jupyterhub/sandbox.yaml"
- # - "jupyterhub/production/sandbox.yaml"
- # -------------------- JUNK ----------------------------------
- # jupyterhub-test:
- # valuesFiles:
- # - "values/jupyterhub-config.yaml"
- # # - "values/staging/jupyterhub-config.yaml"
- # - "values/jupyterhub-sandbox.yaml"
- # # - "values/staging/jupyterhub-sandbox.yaml"
- # name: "jupyterhub-staging"
- # namespace: "staging"
- # chart: "jupyterhub/jupyterhub"
- # # version: "0.9.0"
- # version: "0.10.2-n047.hb387f987"
- # enabled: false
- # priority: 0
- # timeout: 1200
- # # wait: true
- # set:
- # proxy.secretToken: "$SECRET_JUPYTERHUB_PROXY_TOKEN"
+ # - "jupyterhub/prod/sandbox.yaml"
- jupyterhub-github-staging:
+ jupyterhub-github-dev:
<<: *jupyterhub
- name: "jupyterhub-github-staging"
- group: "staging"
- namespace: "jupyterhub-github-staging"
+ name: "jupyterhub-github-dev"
+ group: "dev"
+ namespace: "jupyterhub-github-dev"
set:
- auth.github.callbackUrl: "staging.$HOST_NAME/github/hub/oauth_callback"
+ auth.github.callbackUrl: "dev.$HOST_NAME/github/hub/oauth_callback"
valuesFiles:
- "jupyterhub/config.yaml"
- - "jupyterhub/staging/config.yaml"
+ - "jupyterhub/dev/config.yaml"
# - "jupyterhub/persistentVolumes.yaml"
- "jupyterhub/github.yaml"
- - "jupyterhub/staging/github.yaml"
+ - "jupyterhub/dev/github.yaml"
- jupyterhub-github-production:
+ jupyterhub-github-prod:
<<: *jupyterhub
- name: "jupyterhub-github-production"
- group: "production"
- namespace: "jupyterhub-github-production"
+ name: "jupyterhub-github-prod"
+ group: "prod"
+ namespace: "jupyterhub-github-prod"
# set:
# auth.github.callbackUrl: "$HOST_NAME/github/hub/oauth_callback"
valuesFiles:
- "jupyterhub/config.yaml"
- - "jupyterhub/production/config.yaml"
+ - "jupyterhub/prod/config.yaml"
# - "jupyterhub/persistentVolumes.yaml"
- "jupyterhub/github.yaml"
- # - "jupyterhub/production/github.yaml"
+ # - "jupyterhub/prod/github.yaml"
- cert-manager-production:
+ cert-manager-prod:
name: "cert-manager"
chart: "jetstack/cert-manager"
enabled: false
@@ -406,12 +214,12 @@ apps:
# timeout: 120
version: "v1.7.2"
wait: true
- group: "production"
+ group: "prod"
namespace: "cert-manager"
set:
installCRDs: true
ingressShim.defaultIssuerKind: "ClusterIssuer"
- ingressShim.defaultIssuerName: "letsencrypt-production"
+ ingressShim.defaultIssuerName: "letsencrypt-prod"
hooks:
postUpgrade: "cert-managment/cluster_issuer.yaml"
postInstall: "cert-managment/cluster_issuer.yaml"
@@ -422,37 +230,4 @@ apps:
chart: "nvidia/gpu-operator"
enabled: false
version: "1.9.1"
- group: "production"
-
-# -------------------- JUNK ----------------------------------
-# jupyterhub-test:
-# valuesFiles:
-# - "values/jupyterhub-config.yaml"
-# # - "values/staging/jupyterhub-config.yaml"
-# - "values/jupyterhub-sandbox.yaml"
-# # - "values/staging/jupyterhub-sandbox.yaml"
-# name: "jupyterhub-staging"
-# namespace: "staging"
-# chart: "jupyterhub/jupyterhub"
-# # version: "0.9.0"
-# version: "0.10.2-n047.hb387f987"
-# enabled: false
-# priority: 0
-# timeout: 1200
-# # wait: true
-# set:
-# proxy.secretToken: "$SECRET_JUPYTERHUB_PROXY_TOKEN"
-
-# binderhub:
-# valuesFiles:
-# - "binderhub/secret.yaml"
-# - "binderhub/config.yaml"
-# - "binderhub/production.yaml"
-# name: "binderhub"
-# namespace: "staging"
-# protected: true
-# chart: "jupyterhub/binderhub"
-# version: "0.2.0-n361.h6f57706"
-# enabled: false
-# priority: 0
-# # set:
+ group: "prod"
diff --git a/helmsman/binder.yaml b/helmsman/binder.yaml
index 33c4df7..567e990 100644
--- a/helmsman/binder.yaml
+++ b/helmsman/binder.yaml
@@ -1,44 +1,18 @@
settings:
- kubeContext: bia-binder
+ kubeContext: gpu
apps:
- binderhub-production:
+ binderhub-prod:
enabled: true
valuesFiles:
- "../binderhub/config.yaml"
- - "../binderhub/production/config.yaml"
- # binderhub-production-gpu:
- # enabled: false
- # trow:
- # enabled: false
- # nvidia-device-plugin:
- # enabled: false
+ - "../binderhub/prod/config.yaml"
docker-registry:
enabled: true
ingress-nginx:
enabled: true
- # daskgateway-production:
- # enabled: false
- # binderhub-staging:
- # enabled: false
- # binderhub-production:
- # enabled: false
- persistent-binderhub-production:
+ jupyterhub-sandbox-prod:
+ enabled: false
+ jupyterhub-sandbox-dev:
+ enabled: false
+ cert-manager-prod:
enabled: true
- # binderhub-production-gpu:
- # enabled: false
- # binderhub-staging-gpu:
- # enabled: false
- jupyterhub-sandbox-production:
- enabled: true
- jupyterhub-sandbox-staging:
- enabled: true
- # jupyterhub-sandbox-production:
- # enabled: false
- # jupyterhub-github-staging:
- # enabled: false
- # jupyterhub-github-production:
- # enabled: false
- cert-manager-production:
- enabled: true
- # nvidia-gpu-operator:
- # enabled: false
diff --git a/ingress/gpu_redirect.yaml b/ingress/gpu_redirect.yaml
index 3a175d9..782564c 100644
--- a/ingress/gpu_redirect.yaml
+++ b/ingress/gpu_redirect.yaml
@@ -16,9 +16,9 @@ metadata:
# nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
# certmanager.k8s.io/issuer: "letsencrypt-prod"
# certmanager.k8s.io/acme-challenge-type: http01
- certmanager.k8s.io/cluster-issuer: letsencrypt-production
+ certmanager.k8s.io/cluster-issuer: letsencrypt-prod
cert-manager.k8s.io/acme-challenge-type: http01
- # cert-manager.io/cluster-issuer: letsencrypt-production
+ # cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
diff --git a/jupyterhub/config.yaml b/jupyterhub/config.yaml
index a0e77e9..922e914 100644
--- a/jupyterhub/config.yaml
+++ b/jupyterhub/config.yaml
@@ -1,8 +1,8 @@
-# Configuration for JupyterHub staging deployment
-# Overridden in production deployments
+# Configuration for JupyterHub dev deployment
+# Overridden in prod deployments
hub:
- # baseUrl: /staging/
+ # baseUrl: /dev/
db:
type: sqlite-memory
networkPolicy:
@@ -11,7 +11,19 @@ hub:
# This may help to avoid dangling servers
# extraConfig: |
# c.JupyterHub.cleanup_servers = True
-
+ extraConfig:
+ fuseConfig: |
+ from kubernetes import client
+ def modify_pod_hook(spawner, pod):
+ pod.spec.containers[0].security_context = client.V1SecurityContext(
+ privileged=True,
+ run_as_user=0,
+ capabilities=client.V1Capabilities(
+ add=['SYS_ADMIN']
+ )
+ )
+ return pod
+ c.KubeSpawner.modify_pod_hook = modify_pod_hook
# auth:
# type: tmp
@@ -60,7 +72,7 @@ singleuser:
IDR_HOST: idr.openmicroscopy.org
IDR_USER: public
JUPYTER_ENABLE_LAB: "true"
- cmd: "/usr/local/bin/start-singleuser.sh"
+ cmd: "start-singleuser.sh"
prePuller:
hook:
diff --git a/jupyterhub/github.yaml b/jupyterhub/github.yaml
index a0e2f1e..af2cf21 100644
--- a/jupyterhub/github.yaml
+++ b/jupyterhub/github.yaml
@@ -52,17 +52,22 @@ cull:
every: 600
singleuser:
+ cmd: "start-singleuser.sh"
lifecycleHooks:
postStart:
exec:
- command: ["sh","-c","mamba install nb_conda_kernels --yes"]
+ command:
+ - "sh"
+ - "-c"
+ - >
+ sudo apt update && sudo apt-get -y install fuse libfuse-dev libfuse2 coreutils curl rclone && mamba install nb_conda_kernels --yes
extraEnv:
GRANT_SUDO: "yes"
NOTEBOOK_ARGS: "--allow-root"
uid: 0
defaultUrl: "/lab"
storage:
- capacity: 32G
+ capacity: 128G
extraVolumes:
- name: shm-volume
emptyDir:
diff --git a/jupyterhub/staging/config.yaml b/jupyterhub/staging/config.yaml
index 8167899..2fc0ec3 100644
--- a/jupyterhub/staging/config.yaml
+++ b/jupyterhub/staging/config.yaml
@@ -1,4 +1,4 @@
ingress:
enabled: true
hosts:
- - "staging.binder.bioimagearchive.org"
\ No newline at end of file
+ - "dev.binder.bioimagearchive.org"
\ No newline at end of file
diff --git a/minikube.yaml b/minikube.yaml
index 8763e3c..97d7c2c 100644
--- a/minikube.yaml
+++ b/minikube.yaml
@@ -1,6 +1,6 @@
# Minikube values
-jupyterhub_staging_overrides:
+jupyterhub_dev_overrides:
- values/minikube/jupyterhub-config.yaml
jupyterhub_training_overrides:
diff --git a/persistentVolumes/biostudies.yaml b/persistentVolumes/biostudies.yaml
index 7311eb6..f733bd6 100644
--- a/persistentVolumes/biostudies.yaml
+++ b/persistentVolumes/biostudies.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolume
metadata:
- name: biostudies-nfs
+ name: biostudies-nfs-binder
spec:
storageClassName: nfs
capacity:
@@ -25,7 +25,7 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: biostudies-nfs-pvc
+ name: biostudies-nfs-pvc-binder
spec:
storageClassName: nfs
accessModes:
@@ -33,4 +33,4 @@ spec:
resources:
requests:
storage: 1Gi
- volumeName: biostudies-nfs
+ volumeName: biostudies-nfs-binder
diff --git a/tests/conftest.py b/tests/conftest.py
deleted file mode 100644
index a32189b..0000000
--- a/tests/conftest.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-def pytest_addoption(parser):
- parser.addoption(
- "--binder-url",
- help="Fully qualified URL to the binder installation"
- )
-
- parser.addoption(
- "--hub-url",
- help="Fully qualified URL to the hub installation"
- )
-
-
-@pytest.fixture
-def binder_url(request):
- return request.config.getoption("--binder-url").rstrip("/")
-
-
-@pytest.fixture
-def hub_url(request):
- return request.config.getoption("--hub-url").rstrip("/")
diff --git a/tests/test_build.py b/tests/test_build.py
deleted file mode 100644
index 9888311..0000000
--- a/tests/test_build.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from contextlib import contextmanager
-import json
-import subprocess
-import tempfile
-import time
-import os
-import sys
-
-import pytest
-import requests
-
-
-@contextmanager
-def push_dummy_gh_branch(repo, branch, keyfile):
- """
- Makes a dummy commit on a given github repo as a given branch
-
- Requires that the branch not exist. keyfile should be an absolute path.
-
- Should be used as a contextmanager, it will delete the branch & the
- clone directory when done.
- """
-
- git_env = {'GIT_SSH_COMMAND': f"ssh -i {keyfile}"}
-
- with tempfile.TemporaryDirectory() as gitdir:
- subprocess.check_call(['git', 'clone', repo, gitdir], env=git_env)
- branchfile = os.path.join(gitdir, 'branchname')
- with open(branchfile, 'w') as f:
- f.write(branch)
- subprocess.check_call(['git', 'add', branchfile], cwd=gitdir)
- subprocess.check_call(['git', 'commit', '-m', f'Dummy update for {branch}'], cwd=gitdir)
- subprocess.check_call(
- ['git', 'push', 'origin', f'HEAD:{branch}'],
- env=git_env,
- cwd=gitdir,
- )
-
- try:
- yield
- finally:
- # Delete the branch so we don't clutter!
- subprocess.check_call(
- ['git', 'push', 'origin', f':{branch}'],
- env=git_env,
- cwd=gitdir,
- )
-
-
-
-@pytest.mark.timeout(498)
-def test_build_binder(binder_url):
- """
- We can launch an image that we know hasn't been built
- """
- branch = str(time.time())
- repo = 'binderhub-ci-repos/cached-minimal-dockerfile'
-
- with push_dummy_gh_branch(
- f"git@github.com:/{repo}.git",
- branch,
- os.path.abspath("secrets/binderhub-ci-repos-key"),
- ):
- build_url = binder_url + f"/build/gh/{repo}/{branch}"
- print(f"building {build_url}")
- r = requests.get(build_url, stream=True)
- r.raise_for_status()
- for line in r.iter_lines():
- line = line.decode('utf8')
- if line.startswith('data:'):
- data = json.loads(line.split(':', 1)[1])
- # include message output for debugging
- if data.get('message'):
- sys.stdout.write(data['message'])
- if data.get('phase') == 'ready':
- notebook_url = data['url']
- token = data['token']
- break
- else:
- # This means we never got a 'Ready'!
- assert False
-
- headers = {
- 'Authorization': f'token {token}'
- }
- r = requests.get(notebook_url + '/api', headers=headers)
- assert r.status_code == 200
- assert 'version' in r.json()
-
- r = requests.post(notebook_url + '/api/shutdown', headers=headers)
- assert r.status_code == 200
diff --git a/tests/test_http.py b/tests/test_http.py
deleted file mode 100644
index 291190b..0000000
--- a/tests/test_http.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Basic HTTP tests to make sure things are running"""
-import pprint
-
-import pytest
-import requests
-
-
-def test_binder_up(binder_url):
- """
- Binder Hub URL is up & returning sensible text
- """
- resp = requests.get(binder_url)
- assert resp.status_code == 200
- assert 'GitHub' in resp.text
-
-
-def test_hub_health(hub_url):
- """check JupyterHubHub health endpoint"""
- resp = requests.get(hub_url + "/hub/health")
- print(resp.text)
- assert resp.status_code == 200
-
-
-def test_binder_health(binder_url):
- """check BinderHub health endpoint"""
- resp = requests.get(binder_url + "/health")
- pprint.pprint(resp.json())
- assert resp.status_code == 200
-
-
-# the proxy-patches pod can take up to 30 seconds
-# to register its route after a proxy restart
-@pytest.mark.flaky(reruns=3, reruns_delay=10)
-def test_hub_user_redirect(hub_url):
- """Requesting a Hub URL for a non-running user"""
- # this should *not* redirect for now,
- resp = requests.get(hub_url + "/user/doesntexist")
- assert resp.status_code == 404
diff --git a/tests/test_launch.py b/tests/test_launch.py
deleted file mode 100644
index 3424c20..0000000
--- a/tests/test_launch.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import json
-import pytest
-import requests
-
-@pytest.mark.timeout(497)
-def test_launch_binder(binder_url):
- """
- We can launch an image that most likely already has been built.
- """
- # Known good version of this repo
- repo = 'binder-examples/requirements'
- ref = 'fa84f12'
- build_url = binder_url + '/build/gh/{repo}/{ref}'.format(repo=repo, ref=ref)
- r = requests.get(build_url, stream=True)
- r.raise_for_status()
- for line in r.iter_lines():
- line = line.decode('utf8')
- if line.startswith('data:'):
- data = json.loads(line.split(':', 1)[1])
- if data.get('phase') == 'ready':
- notebook_url = data['url']
- token = data['token']
- break
- else:
- # This means we never got a 'Ready'!
- assert False
-
- headers = {
- 'Authorization': 'token {}'.format(token)
- }
- r = requests.get(notebook_url + '/api', headers=headers)
- assert r.status_code == 200
- assert 'version' in r.json()
-
- r = requests.post(notebook_url + '/api/shutdown', headers=headers)
- assert r.status_code == 200
diff --git a/trow/values.yaml b/trow/values.yaml
index 7b50fe7..32692e5 100644
--- a/trow/values.yaml
+++ b/trow/values.yaml
@@ -5,7 +5,7 @@ trow:
ingress:
enabled: true
annotations:
- cert-manager.io/cluster-issuer: letsencrypt-production
+ cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/proxy-body-size: "0"
tls:
- hosts: