From 6808a61dd4807df0003ec7ff611555e62c660558 Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Fri, 14 Sep 2018 17:06:03 -0500 Subject: [PATCH] Split controller and node binaries Signed-off-by: Jose A. Rivera --- .gitignore | 3 + build.sh | 35 +- cmd/glusterfs-controller/main.go | 35 ++ cmd/glusterfs-node/main.go | 35 ++ cmd/glusterfs/main.go | 62 --- examples/kubernetes/csi-deployment.yaml | 308 ----------- examples/kubernetes/csi-glusterfs.yaml | 102 ++++ examples/kubernetes/csi-node.yaml | 92 ++++ examples/kubernetes/csi-rbac.yaml | 79 +++ {pkg/glusterfs => extras}/Dockerfile | 26 +- pkg/client/client.go | 19 + pkg/client/error.go | 53 ++ pkg/client/util.go | 109 ++++ pkg/command/command.go | 93 ++++ pkg/controller/controllerserver.go | 434 +++++++++++++++ pkg/controller/util.go | 48 ++ pkg/glusterfs/client.go | 343 ++++++++++++ pkg/glusterfs/controllerserver.go | 702 ------------------------ pkg/glusterfs/driver.go | 75 +-- pkg/glusterfs/driver_test.go | 330 ----------- pkg/glusterfs/identityserver.go | 45 -- pkg/glusterfs/nodeserver.go | 170 ------ pkg/glusterfs/utils/config.go | 36 -- pkg/identity/identityserver.go | 51 ++ pkg/node/driver.go | 33 ++ pkg/node/nodeserver.go | 198 +++++++ scripts/build-drivers.sh | 19 + 27 files changed, 1802 insertions(+), 1733 deletions(-) create mode 100644 cmd/glusterfs-controller/main.go create mode 100644 cmd/glusterfs-node/main.go delete mode 100644 cmd/glusterfs/main.go delete mode 100644 examples/kubernetes/csi-deployment.yaml create mode 100644 examples/kubernetes/csi-glusterfs.yaml create mode 100644 examples/kubernetes/csi-node.yaml create mode 100644 examples/kubernetes/csi-rbac.yaml rename {pkg/glusterfs => extras}/Dockerfile (75%) create mode 100644 pkg/client/client.go create mode 100644 pkg/client/error.go create mode 100644 pkg/client/util.go create mode 100644 pkg/command/command.go create mode 100644 pkg/controller/controllerserver.go create mode 100644 pkg/controller/util.go create mode 100644 pkg/glusterfs/client.go delete mode 100644 pkg/glusterfs/controllerserver.go delete mode 100644 pkg/glusterfs/driver_test.go delete mode 100644 pkg/glusterfs/identityserver.go delete mode 100644 pkg/glusterfs/nodeserver.go delete mode 100644 pkg/glusterfs/utils/config.go create mode 100644 pkg/identity/identityserver.go create mode 100644 pkg/node/driver.go create mode 100644 pkg/node/nodeserver.go create mode 100755 scripts/build-drivers.sh diff --git a/.gitignore b/.gitignore index 8704c2517..bc00a0089 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# local build dir +build + # go coverage data profile.cov diff --git a/build.sh b/build.sh index adf93ec93..5a3d8f58d 100755 --- a/build.sh +++ b/build.sh @@ -2,8 +2,8 @@ set -e -# Set driver name -DRIVER="${DRIVER:-glusterfs-csi-driver}" +# Set which drivers to build +DRIVERS="${DRIVERS:-glusterfs-controller glusterfs-node}" # Set which docker repo to tag REPO="${REPO:-gluster/}" @@ -38,17 +38,20 @@ build_args+=( --build-arg "builddate=$BUILDDATE" ) echo "=== $RUNTIME_CMD version ===" $RUNTIME_CMD version -#-- Build container -$RUNTIME_CMD $build \ - -t "${REPO}${DRIVER}" \ - "${build_args[@]}" \ - -f pkg/glusterfs/Dockerfile \ - . \ -|| exit 1 - -# If running tests, extract profile data -if [ "$RUN_TESTS" -ne 0 ]; then - rm -f profile.cov - $RUNTIME_CMD run --entrypoint cat "${REPO}${DRIVER}" \ - /profile.cov > profile.cov -fi +#-- Build containers +for driver in ${DRIVERS}; do + $RUNTIME_CMD $build \ + -t "${REPO}${driver}-csi-driver" \ + --build-arg DRIVER="$driver" \ + "${build_args[@]}" \ + -f extras/Dockerfile \ + . \ + || exit 1 + + # If running tests, extract profile data + if [ "$RUN_TESTS" -ne 0 ]; then + rm -f profile.cov + $RUNTIME_CMD run --entrypoint cat "${REPO}${driver}-csi-driver" \ + /profile.cov > profile.cov + fi +done diff --git a/cmd/glusterfs-controller/main.go b/cmd/glusterfs-controller/main.go new file mode 100644 index 000000000..7354299b8 --- /dev/null +++ b/cmd/glusterfs-controller/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + + "github.com/gluster/gluster-csi-driver/pkg/command" + "github.com/gluster/gluster-csi-driver/pkg/glusterfs" +) + +// Driver Identifiers +const ( + cmdName = "glusterfs-controller-driver" + CSIDriverDesc = "GlusterFS (glusterd2) CSI Controller Driver" + CSIDriverName = "org.gluster.glusterfs" + CSIDriverVersion = "0.0.9" +) + +func init() { + command.Init() +} + +func main() { + var config = command.NewConfig(cmdName, CSIDriverName, CSIDriverVersion, CSIDriverDesc) + + d := glusterfs.New(config) + if d == nil { + fmt.Println("Failed to initialize GlusterFS CSI driver") + os.Exit(1) + } + + cmd := command.InitCommand(config, d) + + command.Run(config, cmd) +} diff --git a/cmd/glusterfs-node/main.go b/cmd/glusterfs-node/main.go new file mode 100644 index 000000000..dc7a33066 --- /dev/null +++ b/cmd/glusterfs-node/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + + "github.com/gluster/gluster-csi-driver/pkg/command" + "github.com/gluster/gluster-csi-driver/pkg/node" +) + +// Driver Identifiers +const ( + cmdName = "glusterfs-node-driver" + CSIDriverDesc = "GlusterFS (glusterd2) CSI Node Driver" + CSIDriverName = "org.gluster.glusterfs" + CSIDriverVersion = "0.0.9" +) + +func init() { + command.Init() +} + +func main() { + var config = command.NewConfig(cmdName, CSIDriverName, CSIDriverVersion, CSIDriverDesc) + + d := node.New(config) + if d == nil { + fmt.Println("Failed to initialize GlusterFS CSI driver") + os.Exit(1) + } + + cmd := command.InitCommand(config, d) + + command.Run(config, cmd) +} diff --git a/cmd/glusterfs/main.go b/cmd/glusterfs/main.go deleted file mode 100644 index cf9906406..000000000 --- a/cmd/glusterfs/main.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - gfd "github.com/gluster/gluster-csi-driver/pkg/glusterfs" - "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" - - "github.com/spf13/cobra" -) - -func init() { - // #nosec - _ = flag.Set("logtostderr", "true") -} - -func main() { - // #nosec - _ = flag.CommandLine.Parse([]string{}) - var config = utils.NewConfig() - - cmd := &cobra.Command{ - Use: "glusterfs-csi-driver", - Short: "GlusterFS CSI driver", - Run: func(cmd *cobra.Command, args []string) { - handle(config) - }, - } - - cmd.Flags().AddGoFlagSet(flag.CommandLine) - - cmd.PersistentFlags().StringVar(&config.NodeID, "nodeid", "", "CSI node id") - // #nosec - _ = cmd.MarkPersistentFlagRequired("nodeid") - - cmd.PersistentFlags().StringVar(&config.Endpoint, "endpoint", "", "CSI endpoint") - - cmd.PersistentFlags().StringVar(&config.RestURL, "resturl", "", "glusterd2 rest endpoint") - - cmd.PersistentFlags().StringVar(&config.RestUser, "username", "glustercli", "glusterd2 user name") - - cmd.PersistentFlags().StringVar(&config.RestSecret, "restsecret", "", "glusterd2 rest user secret") - - if err := cmd.Execute(); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "%s", err.Error()) - os.Exit(1) - } -} - -func handle(config *utils.Config) { - if config.Endpoint == "" { - config.Endpoint = os.Getenv("CSI_ENDPOINT") - } - d := gfd.New(config) - if d == nil { - fmt.Println("Failed to initialize GlusterFS CSI driver") - os.Exit(1) - } - d.Run() -} diff --git a/examples/kubernetes/csi-deployment.yaml b/examples/kubernetes/csi-deployment.yaml deleted file mode 100644 index 9785f43e6..000000000 --- a/examples/kubernetes/csi-deployment.yaml +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright 2018 The Gluster CSI Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- - -kind: Service -apiVersion: v1 -metadata: - name: csi-attacher-glusterfsplugin - labels: - app: csi-attacher-glusterfsplugin -spec: - selector: - app: csi-attacher-glusterfsplugin - ports: - - name: dummy - port: 12345 - ---- -kind: StatefulSet -apiVersion: apps/v1beta1 -metadata: - name: csi-attacher-glusterfsplugin -spec: - serviceName: "csi-attacher" - replicas: 1 - template: - metadata: - labels: - app: csi-attacher-glusterfsplugin - spec: - serviceAccount: glusterfs-csi - containers: - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v0.4.0 - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - - name: glusterfs - image: docker.io/gluster/glusterfs-csi-driver - args: - - "--nodeid=$(NODE_ID)" - - "--endpoint=$(CSI_ENDPOINT)" - - "--resturl=$(REST_URL)" - - "--restsecret=$(REST_SECRET)" - env: - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix://plugin/csi.sock - - name: REST_URL - value: http://192.168.121.182:24007 - - name: REST_SECRET - value: b03045b7988258557ecd3e136cd37ba3f928ea0831b3b1b7ed8ae238d36d9071 - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /plugin - volumes: - - name: socket-dir - emptyDir: - ---- -kind: DaemonSet -apiVersion: apps/v1beta2 -metadata: - name: csi-nodeplugin-glusterfsplugin -spec: - selector: - matchLabels: - app: csi-nodeplugin-glusterfsplugin - template: - metadata: - labels: - app: csi-nodeplugin-glusterfsplugin - spec: - serviceAccount: glusterfs-csi - hostNetwork: true - containers: - - name: driver-registrar - image: quay.io/k8scsi/driver-registrar:v0.4.0 - args: - - "--v=5" - - "--csi-address=$(ADDRESS)" - - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" - env: - - name: ADDRESS - value: /plugin/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/org.gluster.glusterfs/csi.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /plugin - - name: registration-dir - mountPath: /registration - - name: glusterfs - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - image: docker.io/gluster/glusterfs-csi-driver - args: - - "--nodeid=$(NODE_ID)" - - "--endpoint=$(CSI_ENDPOINT)" - - "--resturl=$(REST_URL)" - - "--restsecret=$(REST_SECRET)" - env: - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix://plugin/csi.sock - - name: REST_URL - value: http://192.168.121.182:24007 - - name: REST_SECRET - value: b03045b7988258557ecd3e136cd37ba3f928ea0831b3b1b7ed8ae238d36d9071 - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: plugin-dir - mountPath: /plugin - - name: pods-mount-dir - mountPath: /var/lib/kubelet/pods - mountPropagation: "Bidirectional" - volumes: - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/org.gluster.glusterfs - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins/ - type: Directory - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet/pods - type: Directory - ---- -kind: Service -apiVersion: v1 -metadata: - name: csi-provisioner-glusterfsplugin - labels: - app: csi-provisioner-glusterfsplugin -spec: - selector: - app: csi-provisioner-glusterfsplugin - ports: - - name: dummy - port: 12345 - ---- -kind: StatefulSet -apiVersion: apps/v1beta1 -metadata: - name: csi-provisioner-glusterfsplugin -spec: - serviceName: "csi-provisioner-glusterfsplugin" - replicas: 1 - template: - metadata: - labels: - app: csi-provisioner-glusterfsplugin - spec: - serviceAccount: glusterfs-csi - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v0.4.0 - args: - - "--provisioner=org.gluster.glusterfs" - - "--csi-address=$(ADDRESS)" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - imagePullPolicy: "IfNotPresent" - - name: csi-snapshotter - image: quay.io/k8scsi/csi-snapshotter:v0.4.0 - args: - - "--csi-address=$(ADDRESS)" - - "--connection-timeout=15s" - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - - name: glusterfs - image: docker.io/gluster/glusterfs-csi-driver - args: - - "--nodeid=$(NODE_ID)" - - "--endpoint=$(CSI_ENDPOINT)" - - "--resturl=$(REST_URL)" - - "--restsecret=$(REST_SECRET)" - env: - - name: NODE_ID - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CSI_ENDPOINT - value: unix://plugin/csi.sock - - name: REST_URL - value: http://192.168.121.182:24007 - - name: REST_SECRET - value: b03045b7988258557ecd3e136cd37ba3f928ea0831b3b1b7ed8ae238d36d9071 - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: socket-dir - mountPath: /plugin - volumes: - - name: socket-dir - emptyDir: - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: glusterfs-csi - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: glusterfs-csi -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create", "delete"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch", "delete", "get"] - - apiGroups: ["csi.storage.k8s.io"] - resources: ["csinodeinfos"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: glusterfs-csi-role -subjects: - - kind: ServiceAccount - name: glusterfs-csi - namespace: default -roleRef: - kind: ClusterRole - name: glusterfs-csi - apiGroup: rbac.authorization.k8s.io diff --git a/examples/kubernetes/csi-glusterfs.yaml b/examples/kubernetes/csi-glusterfs.yaml new file mode 100644 index 000000000..2ca4af8bc --- /dev/null +++ b/examples/kubernetes/csi-glusterfs.yaml @@ -0,0 +1,102 @@ +# Copyright 2018 The Gluster CSI Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +kind: Service +apiVersion: v1 +metadata: + name: csi-controller-glusterfs + labels: + app: csi-controller-glusterfs +spec: + selector: + app: csi-controller-glusterfs + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: csi-controller-glusterfs +spec: + serviceName: "csi-controller-glusterfs" + replicas: 1 + template: + metadata: + labels: + app: csi-controller-glusterfs + spec: + serviceAccount: glusterfs-csi + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v0.4.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v0.4.0 + args: + - "--provisioner=org.gluster.glusterfs" + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v0.4.0 + args: + - "--csi-address=$(ADDRESS)" + - "--connection-timeout=15s" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: glusterfs + image: gluster/glusterfs-controller-csi-driver + args: + - "--v=5" + env: + - name: CSI_NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://var/lib/csi/sockets/pluginproxy/csi.sock + - name: REST_URL + value: http://192.168.121.182:24007 + - name: REST_SECRET + value: b03045b7988258557ecd3e136cd37ba3f928ea0831b3b1b7ed8ae238d36d9071 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: diff --git a/examples/kubernetes/csi-node.yaml b/examples/kubernetes/csi-node.yaml new file mode 100644 index 000000000..b56c5e4ee --- /dev/null +++ b/examples/kubernetes/csi-node.yaml @@ -0,0 +1,92 @@ +# Copyright 2018 The Gluster CSI Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +kind: DaemonSet +apiVersion: apps/v1beta2 +metadata: + name: csi-nodeplugin-glusterfs +spec: + selector: + matchLabels: + app: csi-nodeplugin-glusterfs + template: + metadata: + labels: + app: csi-nodeplugin-glusterfs + spec: + serviceAccount: glusterfs-csi + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: driver-registrar-gd2 + image: quay.io/k8scsi/driver-registrar:v0.4.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /plugin/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/org.gluster.glusterfs/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /plugin + - name: registration-dir + mountPath: /registration + - name: glusterfs + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: gluster/glusterfs-node-csi-driver + args: + - "--v=5" + env: + - name: CSI_NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://plugin/csi.sock + - name: REST_URL + value: http://192.168.121.182:24007 + - name: REST_SECRET + value: b03045b7988258557ecd3e136cd37ba3f928ea0831b3b1b7ed8ae238d36d9071 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: plugin-dir + mountPath: /plugin + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + volumes: + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/org.gluster.glusterfs + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins/ + type: Directory + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory diff --git a/examples/kubernetes/csi-rbac.yaml b/examples/kubernetes/csi-rbac.yaml new file mode 100644 index 000000000..eb07a9876 --- /dev/null +++ b/examples/kubernetes/csi-rbac.yaml @@ -0,0 +1,79 @@ +# Copyright 2018 The Gluster CSI Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: glusterfs-csi + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: glusterfs-csi +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "create", "patch", "delete"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch", "delete", "get"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: glusterfs-csi-role +subjects: + - kind: ServiceAccount + name: glusterfs-csi + namespace: default +roleRef: + kind: ClusterRole + name: glusterfs-csi + apiGroup: rbac.authorization.k8s.io diff --git a/pkg/glusterfs/Dockerfile b/extras/Dockerfile similarity index 75% rename from pkg/glusterfs/Dockerfile rename to extras/Dockerfile index 0016290f3..4fb87132c 100644 --- a/pkg/glusterfs/Dockerfile +++ b/extras/Dockerfile @@ -50,24 +50,26 @@ RUN [ $RUN_TESTS -eq 0 ] || ${SCRIPTSDIR}/test-go.sh #-- Build phase -# Build executable -RUN CGO_ENABLED=0 GOOS=linux go build -ldflags '-extldflags "-static"' -o /build/glusterfs-csi-driver cmd/glusterfs/main.go +ARG DRIVER +ARG BUILDDIR=/ -# Ensure the binary is statically linked -RUN ldd /build/glusterfs-csi-driver | grep -q "not a dynamic executable" +RUN ./scripts/build-drivers.sh ${DRIVER} #-- Final container FROM docker.io/centos:7.5.1804 as final +ARG DRIVER -# Install dependencies -RUN yum -y install centos-release-gluster && \ +# Install dependencies for node drivers +RUN [[ "${DRIVER}" != *node* ]] || ( \ + yum -y install centos-release-gluster && \ yum -y install glusterfs-fuse && \ yum clean all -y && \ rm -rf /var/cache/yum && \ - rpm -qa | grep gluster | tee /gluster-rpm-versions.txt + rpm -qa | grep gluster | tee /gluster-rpm-versions.txt \ + ) -# Copy glusterfs-csi-driver from build phase +# Copy driver from build phase COPY --from=build /build / # The version of the driver (git describe --dirty --always --tags | sed 's/-/./2' | sed 's/-/./2') @@ -76,12 +78,12 @@ ARG version="(unknown)" ARG builddate="(unknown)" LABEL build-date="${builddate}" -LABEL io.k8s.description="FUSE-based CSI driver for Gluster file access" -LABEL name="glusterfs-csi-driver" -LABEL Summary="FUSE-based CSI driver for Gluster file access" +LABEL io.k8s.description="${DRIVER} CSI driver for Gluster file access" +LABEL name="${DRIVER}-csi-driver" +LABEL Summary="${DRIVER} CSI driver for Gluster file access" LABEL vcs-type="git" LABEL vcs-url="https://github.com/gluster/gluster-csi-driver" LABEL vendor="gluster.org" LABEL version="${version}" -ENTRYPOINT ["/glusterfs-csi-driver"] +ENTRYPOINT ["/${DRIVER}-csi-driver"] diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 000000000..8d8030b09 --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,19 @@ +package client + +import ( + csi "github.com/container-storage-interface/spec/lib/go/csi/v0" +) + +// GlusterClient is an interface to clients for different Gluster server types +type GlusterClient interface { + GetClusterNodes(string) ([]string, error) + CheckExistingVolume(string, int64) error + CreateVolume(string, int64, map[string]string) error + DeleteVolume(string) error + ListVolumes() ([]*csi.Volume, error) + CheckExistingSnapshot(string, string) (*csi.Snapshot, error) + CreateSnapshot(string, string) (*csi.Snapshot, error) + CloneSnapshot(string, string) error + DeleteSnapshot(string) error + ListSnapshots(string, string) ([]*csi.Snapshot, error) +} diff --git a/pkg/client/error.go b/pkg/client/error.go new file mode 100644 index 000000000..4af83636d --- /dev/null +++ b/pkg/client/error.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" +) + +var errNotFoundStr = "not found" +var errExistsStr = "already exists with a different" + +type clientErr struct { + errStr string + vars []string +} + +func (f clientErr) Error() string { + errFmt := "" + if f.errStr == errNotFoundStr { + errFmt = fmt.Sprintf("%s %s %s", f.vars[0], f.vars[1], f.errStr) + } else if f.errStr == errExistsStr { + errFmt = fmt.Sprintf("%s %s %s %s, %s", f.vars[0], f.vars[1], f.errStr, f.vars[2], f.vars[3]) + } + + return errFmt +} + +func newClientErr(errStr string, vars ...string) clientErr { + return clientErr{errStr, vars} +} + +func matchClientErr(err error, errStr string) bool { + cliErr, ok := err.(clientErr) + return ok && cliErr.errStr == errStr +} + +// ErrNotFound creates a new "not found" error +func ErrNotFound(kind, name string) error { + return newClientErr(errNotFoundStr, kind, name) +} + +// ErrExists creates a new "different owner" error +func ErrExists(kind, name, property, propVal string) error { + return newClientErr(errExistsStr, kind, name, property, propVal) +} + +// IsErrNotFound checks for an ErrNotFound +func IsErrNotFound(err error) bool { + return matchClientErr(err, errNotFoundStr) +} + +// IsErrExists checks for an ErrExists +func IsErrExists(err error) bool { + return matchClientErr(err, errExistsStr) +} diff --git a/pkg/client/util.go b/pkg/client/util.go new file mode 100644 index 000000000..7a0bc670e --- /dev/null +++ b/pkg/client/util.go @@ -0,0 +1,109 @@ +package client + +import ( + "strconv" + + "github.com/golang/glog" +) + +// Common allocation units +const ( + KB int64 = 1000 + MB int64 = 1000 * KB + GB int64 = 1000 * MB + TB int64 = 1000 * GB + + KiB int64 = 1024 + MiB int64 = 1024 * KiB + GiB int64 = 1024 * MiB +) + +// RoundUpSize calculates how many allocation units are needed to accommodate +// a volume of given size. +// RoundUpSize(1500 * 1000*1000, 1000*1000*1000) returns '2' +// (2 GB is the smallest allocatable volume that can hold 1500MiB) +func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { + return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes +} + +// RoundUpToGB rounds up given quantity upto chunks of GB +func RoundUpToGB(sizeBytes int64) int64 { + return RoundUpSize(sizeBytes, GB) +} + +// RoundUpToMiB rounds up given quantity upto chunks of MiB +func RoundUpToMiB(sizeBytes int64) int64 { + return RoundUpSize(sizeBytes, MiB) +} + +// RoundUpToGiB rounds up given quantity upto chunks of GiB +func RoundUpToGiB(sizeBytes int64) int64 { + return RoundUpSize(sizeBytes, GiB) +} + +// SetPointerIfEmpty returns a new parameter if the old parameter is empty +func SetPointerIfEmpty(old, new interface{}) interface{} { + if old == nil { + return new + } + return old +} + +// SetStringIfEmpty returns a new parameter if the old parameter is empty +func SetStringIfEmpty(old, new string) string { + if len(old) == 0 { + return new + } + return old +} + +// ParseIntWithDefault parses a string into an int, using a default for an +// empty or illegal string +func ParseIntWithDefault(new string, defInt int) int { + newInt := defInt + + if len(new) != 0 { + parsedInt, err := strconv.Atoi(new) + if err != nil { + glog.Errorf("bad int string [%s], using default [%s]", new, defInt) + } else { + newInt = parsedInt + } + } + + return newInt +} + +// ParseBoolWithDefault parses a string into a bool, using a default for an +// empty or illegal string +func ParseBoolWithDefault(new string, defBool bool) bool { + newBool := defBool + + if len(new) != 0 { + parsedBool, err := strconv.ParseBool(new) + if err != nil { + glog.Errorf("bad bool string [%s], using default [%s]", new, defBool) + } else { + newBool = parsedBool + } + } + + return newBool +} + +// ParseFloatWithDefault parses a string into an float32, using a default for an +// empty or illegal string +func ParseFloatWithDefault(new string, defFloat float32) float32 { + newFloat := defFloat + + if len(new) != 0 { + parsedFloat, err := strconv.ParseFloat(new, 32) + if err != nil { + glog.Errorf("bad float string [%s], using default [%s]", new, defFloat) + } else { + newFloat = float32(parsedFloat) + } + } + + return newFloat +} diff --git a/pkg/command/command.go b/pkg/command/command.go new file mode 100644 index 000000000..ec082a148 --- /dev/null +++ b/pkg/command/command.go @@ -0,0 +1,93 @@ +package command + +import ( + "flag" + "fmt" + "os" + + "github.com/spf13/cobra" +) + +// Config is the driver configuration struct +type Config struct { + Endpoint string // CSI endpoint + NodeID string // CSI node ID + Name string // CSI driver name + Version string // CSI driver version + Desc string // CSI driver description + + RestURL string // GD2 endpoint + RestUser string // GD2 user name who has access to create and delete volume + RestSecret string // GD2 user password + + CmdName string // Executable name +} + +// NewConfig returns config struct to initialize new driver +func NewConfig(cmdName, CSIDriverName, CSIDriverVersion, CSIDriverDesc string) *Config { + return &Config{ + Name: CSIDriverName, + Version: CSIDriverVersion, + Desc: CSIDriverDesc, + CmdName: cmdName, + } +} + +// Driver interface +type Driver interface { + Run() +} + +// Init driver executable function +func Init() { + // #nosec + _ = flag.Set("logtostderr", "true") +} + +// InitCommand parses flags and sets up cobra Command +func InitCommand(config *Config, newDriver Driver) *cobra.Command { + // #nosec + _ = flag.CommandLine.Parse([]string{}) + + cmd := &cobra.Command{ + Use: config.Name, + Short: config.Desc, + Run: func(cmd *cobra.Command, args []string) { + newDriver.Run() + }, + } + + cmd.Flags().AddGoFlagSet(flag.CommandLine) + + cmd.PersistentFlags().StringVar(&config.NodeID, "nodeid", "", "CSI node id") + cmd.PersistentFlags().StringVar(&config.Endpoint, "endpoint", "", "CSI endpoint") + cmd.PersistentFlags().StringVar(&config.RestURL, "resturl", "", "glusterd2 rest endpoint") + cmd.PersistentFlags().StringVar(&config.RestUser, "username", "glustercli", "glusterd2 user name") + cmd.PersistentFlags().StringVar(&config.RestSecret, "restsecret", "", "glusterd2 rest user secret") + + if config.NodeID == "" { + config.NodeID = os.Getenv("CSI_NODE_ID") + } + if config.Endpoint == "" { + config.Endpoint = os.Getenv("CSI_ENDPOINT") + } + if config.RestURL == "" { + config.RestURL = os.Getenv("REST_URL") + } + if config.RestUser == "" { + config.RestUser = os.Getenv("REST_USER") + } + if config.RestSecret == "" { + config.RestSecret = os.Getenv("REST_SECRET") + } + + return cmd +} + +// Run driver executable function +func Run(config *Config, cmd *cobra.Command) { + if err := cmd.Execute(); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "%s", err.Error()) + os.Exit(1) + } +} diff --git a/pkg/controller/controllerserver.go b/pkg/controller/controllerserver.go new file mode 100644 index 000000000..e3635a8f7 --- /dev/null +++ b/pkg/controller/controllerserver.go @@ -0,0 +1,434 @@ +package controller + +import ( + "context" + "fmt" + "strings" + + "github.com/gluster/gluster-csi-driver/pkg/client" + "github.com/gluster/gluster-csi-driver/pkg/command" + "github.com/gluster/gluster-csi-driver/pkg/identity" + + api "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/golang/glog" + csi "github.com/kubernetes-csi/drivers/pkg/csi-common" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Server takes a configuration and a GlusterClients cache +type Server struct { + *command.Config + client client.GlusterClient +} + +// Run starts the controller server +func Run(config *command.Config, cli client.GlusterClient) { + srv := csi.NewNonBlockingGRPCServer() + srv.Start(config.Endpoint, identity.NewServer(config), NewServer(config, cli), nil) + srv.Wait() +} + +// NewServer instantiates a Server +func NewServer(config *command.Config, cli client.GlusterClient) *Server { + cs := &Server{ + Config: config, + client: cli, + } + + return cs +} + +// CreateVolume creates and starts the volume +func (cs *Server) CreateVolume(ctx context.Context, req *api.CreateVolumeRequest) (*api.CreateVolumeResponse, error) { + glog.V(2).Infof("request received %+v", req) + + if err := cs.validateCreateVolumeReq(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + volumeName := req.GetName() + + // If capacity mentioned, pick that or use default size 1 GB + volSizeBytes := client.GB + if capRange := req.GetCapacityRange(); capRange != nil { + volSizeBytes = capRange.GetRequiredBytes() + } + + gc := cs.client + + glog.V(1).Infof("creating volume with name %s", volumeName) + + err := gc.CheckExistingVolume(volumeName, volSizeBytes) + if err != nil { + if !client.IsErrNotFound(err) { + glog.Error(err.Error()) + return nil, status.Error(codes.AlreadyExists, err.Error()) + } + + err = cs.doCreateVolume(gc, req, volSizeBytes) + if err != nil { + return nil, err + } + } else { + glog.V(1).Infof("requested volume %s already exists in the gluster cluster", volumeName) + } + + glusterServers, err := gc.GetClusterNodes(volumeName) + if err != nil { + glog.Errorf("failed to get cluster nodes for %s: %v", volumeName, err) + return nil, status.Errorf(codes.Internal, "failed to get cluster nodes for %s: %v", volumeName, err) + } + glog.V(2).Infof("gluster servers: %+v", glusterServers) + + params := map[string]string{} + params["glustervol"] = volumeName + params["glusterserver"] = glusterServers[0] + params["glusterbkpservers"] = strings.Join(glusterServers[1:], ":") + + resp := &api.CreateVolumeResponse{ + Volume: &api.Volume{ + Id: volumeName, + CapacityBytes: volSizeBytes, + Attributes: params, + }, + } + + glog.V(4).Infof("CSI CreateVolume response: %+v", resp) + return resp, nil +} + +func (cs *Server) doCreateVolume(gc client.GlusterClient, req *api.CreateVolumeRequest, volSizeBytes int64) error { + volumeName := req.GetName() + reqParams := req.GetParameters() + snapName := req.GetVolumeContentSource().GetSnapshot().GetId() + + if snapName != "" { + _, err := gc.CheckExistingSnapshot(snapName, volumeName) + if err != nil { + glog.Error(err.Error()) + code := codes.Internal + if client.IsErrExists(err) { + code = codes.AlreadyExists + } else if client.IsErrNotFound(err) { + code = codes.NotFound + } + return status.Error(code, err.Error()) + } + + err = gc.CloneSnapshot(snapName, volumeName) + if err != nil { + glog.Error(err.Error()) + return status.Error(codes.Internal, err.Error()) + } + } else { + // If volume does not exist, provision volume + glog.V(4).Infof("received request to create volume %s with size %d", volumeName, volSizeBytes) + err := gc.CreateVolume(volumeName, volSizeBytes, reqParams) + if err != nil { + glog.Errorf("failed to create volume: %v", err) + return status.Errorf(codes.Internal, "failed to create volume: %v", err) + } + } + + return nil +} + +func (cs *Server) validateCreateVolumeReq(req *api.CreateVolumeRequest) error { + if req == nil { + return fmt.Errorf("request cannot be empty") + } + if req.GetName() == "" { + return fmt.Errorf("Name is a required field") + } + if reqCaps := req.GetVolumeCapabilities(); reqCaps == nil { + return fmt.Errorf("VolumeCapabilities is a required field") + } + return nil +} + +// DeleteVolume deletes the given volume. +func (cs *Server) DeleteVolume(ctx context.Context, req *api.DeleteVolumeRequest) (*api.DeleteVolumeResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "volume delete request is nil") + } + + volumeID := req.GetVolumeId() + if volumeID == "" { + return nil, status.Error(codes.InvalidArgument, "VolumeId is a required field") + } + glog.V(2).Infof("deleting volume with ID: %s", volumeID) + + gc := cs.client + err := gc.DeleteVolume(volumeID) + if err != nil && !client.IsErrNotFound(err) { + glog.Errorf("error deleting volume %s: %v", volumeID, err) + return nil, status.Errorf(codes.Internal, "error deleting volume %s: %v", volumeID, err) + } else if client.IsErrNotFound(err) { + glog.Warningf("volume %s not found", volumeID) + } + + glog.Infof("successfully deleted volume %s", volumeID) + return &api.DeleteVolumeResponse{}, nil +} + +// ControllerPublishVolume return Unimplemented error +func (cs *Server) ControllerPublishVolume(ctx context.Context, req *api.ControllerPublishVolumeRequest) (*api.ControllerPublishVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerUnpublishVolume return Unimplemented error +func (cs *Server) ControllerUnpublishVolume(ctx context.Context, req *api.ControllerUnpublishVolumeRequest) (*api.ControllerUnpublishVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +// ValidateVolumeCapabilities checks whether the volume capabilities requested +// are supported. +func (cs *Server) ValidateVolumeCapabilities(ctx context.Context, req *api.ValidateVolumeCapabilitiesRequest) (*api.ValidateVolumeCapabilitiesResponse, error) { + if err := cs.validateVolumeCapabilitiesReq(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + volumeID := req.GetVolumeId() + reqCaps := req.GetVolumeCapabilities() + + gc := cs.client + err := gc.CheckExistingVolume(volumeID, 0) + if err != nil { + code := codes.Internal + if client.IsErrNotFound(err) { + code = codes.NotFound + } + glog.Error(err.Error()) + return nil, status.Error(code, err.Error()) + } + + var vcaps []*api.VolumeCapability_AccessMode + for _, mode := range []api.VolumeCapability_AccessMode_Mode{ + api.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + api.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + api.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + api.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + } { + vcaps = append(vcaps, &api.VolumeCapability_AccessMode{Mode: mode}) + } + capSupport := true + IsSupport := func(mode api.VolumeCapability_AccessMode_Mode) bool { + for _, m := range vcaps { + if mode == m.Mode { + return true + } + } + return false + } + for _, cap := range reqCaps { + if !IsSupport(cap.AccessMode.Mode) { + capSupport = false + } + } + + resp := &api.ValidateVolumeCapabilitiesResponse{ + Supported: capSupport, + } + glog.V(1).Infof("GlusterFS CSI driver volume capabilities: %v", resp) + return resp, nil +} + +func (cs *Server) validateVolumeCapabilitiesReq(req *api.ValidateVolumeCapabilitiesRequest) error { + if req == nil { + return fmt.Errorf("request cannot be empty") + } + if req.GetVolumeId() == "" { + return fmt.Errorf("VolumeId is a required field") + } + if req.GetVolumeCapabilities() == nil { + return fmt.Errorf("VolumeCapabilities is a required field") + } + return nil +} + +// ListVolumes returns a list of volumes +func (cs *Server) ListVolumes(ctx context.Context, req *api.ListVolumesRequest) (*api.ListVolumesResponse, error) { + start, end, err := listParseRange(req.GetStartingToken(), req.GetMaxEntries()) + if err != nil { + glog.Error(err) + return nil, status.Error(codes.Aborted, err.Error()) + } + + entries := []*api.ListVolumesResponse_Entry{} + + gc := cs.client + vols, err := gc.ListVolumes() + if err != nil { + glog.Error(err) + return nil, status.Error(codes.Internal, err.Error()) + } + + for _, vol := range vols { + entries = append(entries, &api.ListVolumesResponse_Entry{Volume: vol}) + } + + end, endStr, err := listGetEnd(len(entries), start, end) + if err != nil { + glog.Error(err) + return nil, status.Error(codes.Aborted, err.Error()) + } + resp := &api.ListVolumesResponse{ + Entries: entries[start:end], + NextToken: endStr, + } + return resp, nil +} + +// GetCapacity returns the capacity of the storage pool +func (cs *Server) GetCapacity(ctx context.Context, req *api.GetCapacityRequest) (*api.GetCapacityResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerGetCapabilities returns the capabilities of the controller service. +func (cs *Server) ControllerGetCapabilities(ctx context.Context, req *api.ControllerGetCapabilitiesRequest) (*api.ControllerGetCapabilitiesResponse, error) { + newCap := func(cap api.ControllerServiceCapability_RPC_Type) *api.ControllerServiceCapability { + return &api.ControllerServiceCapability{ + Type: &api.ControllerServiceCapability_Rpc{ + Rpc: &api.ControllerServiceCapability_RPC{ + Type: cap, + }, + }, + } + } + + var caps []*api.ControllerServiceCapability + for _, cap := range []api.ControllerServiceCapability_RPC_Type{ + api.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + api.ControllerServiceCapability_RPC_LIST_VOLUMES, + api.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + api.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, + } { + caps = append(caps, newCap(cap)) + } + + resp := &api.ControllerGetCapabilitiesResponse{ + Capabilities: caps, + } + + return resp, nil +} + +// CreateSnapshot create snapshot of an existing PV +func (cs *Server) CreateSnapshot(ctx context.Context, req *api.CreateSnapshotRequest) (*api.CreateSnapshotResponse, error) { + + if err := cs.validateCreateSnapshotReq(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + snapName := req.GetName() + srcVol := req.GetSourceVolumeId() + + glog.V(2).Infof("received request to create snapshot %s from volume %s", snapName, srcVol) + + gc := cs.client + err := gc.CheckExistingVolume(srcVol, 0) + if err != nil { + code := codes.Internal + if client.IsErrNotFound(err) { + code = codes.AlreadyExists + } + glog.Error(err) + return nil, status.Errorf(code, "error finding volume %s for snapshot %s: %v", srcVol, snapName, err) + } + + snap, err := gc.CheckExistingSnapshot(snapName, srcVol) + if err != nil { + if !client.IsErrNotFound(err) { + code := codes.Internal + if client.IsErrExists(err) { + code = codes.AlreadyExists + } + glog.Error(err.Error()) + return nil, status.Error(code, err.Error()) + } + + snap, err = gc.CreateSnapshot(snapName, srcVol) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + + return &api.CreateSnapshotResponse{Snapshot: snap}, nil +} + +func (cs *Server) validateCreateSnapshotReq(req *api.CreateSnapshotRequest) error { + if req == nil { + return fmt.Errorf("request cannot be empty") + } + if req.GetName() == "" { + return fmt.Errorf("Name is a required field") + } + if req.GetSourceVolumeId() == "" { + return fmt.Errorf("SourceVolumeId is a required field") + } + if req.GetName() == req.GetSourceVolumeId() { + return fmt.Errorf("SourceVolumeId and Name cannot be same") + } + return nil +} + +// DeleteSnapshot delete provided snapshot of a PV +func (cs *Server) DeleteSnapshot(ctx context.Context, req *api.DeleteSnapshotRequest) (*api.DeleteSnapshotResponse, error) { + if req == nil { + return nil, status.Errorf(codes.InvalidArgument, "DeleteSnapshot request is nil") + } + if req.GetSnapshotId() == "" { + return nil, status.Error(codes.InvalidArgument, "DeleteSnapshot - snapshotId is empty") + } + glog.V(4).Infof("deleting snapshot %s", req.GetSnapshotId()) + + snapName := req.GetSnapshotId() + + gc := cs.client + err := gc.DeleteSnapshot(snapName) + if err != nil && !client.IsErrNotFound(err) { + glog.Error(err) + return nil, status.Errorf(codes.Internal, "error deleting snapshot %s: %v", snapName, err) + } else if client.IsErrNotFound(err) { + glog.Warningf("snapshot %s not found", snapName) + } + + glog.Infof("successfully deleted snapshot %s", snapName) + return &api.DeleteSnapshotResponse{}, nil +} + +// ListSnapshots list the snapshots of a PV +func (cs *Server) ListSnapshots(ctx context.Context, req *api.ListSnapshotsRequest) (*api.ListSnapshotsResponse, error) { + start, end, err := listParseRange(req.GetStartingToken(), req.GetMaxEntries()) + if err != nil { + glog.Error(err) + return nil, status.Error(codes.Aborted, err.Error()) + } + + entries := []*api.ListSnapshotsResponse_Entry{} + snapName := req.GetSnapshotId() + srcVol := req.GetSourceVolumeId() + + gc := cs.client + snaps, err := gc.ListSnapshots(snapName, srcVol) + if err != nil { + glog.Errorf("failed to list snapshots: %v", err) + return nil, status.Errorf(codes.Internal, "failed to list snapshots: %v", err) + } + + for _, snap := range snaps { + entries = append(entries, &api.ListSnapshotsResponse_Entry{Snapshot: snap}) + } + + end, endStr, err := listGetEnd(len(entries), start, end) + if err != nil { + glog.Error(err) + return nil, status.Error(codes.Aborted, err.Error()) + } + resp := &api.ListSnapshotsResponse{ + Entries: entries[start:end], + NextToken: endStr, + } + return resp, err +} diff --git a/pkg/controller/util.go b/pkg/controller/util.go new file mode 100644 index 000000000..a27ebdca5 --- /dev/null +++ b/pkg/controller/util.go @@ -0,0 +1,48 @@ +package controller + +import ( + "fmt" + "strconv" +) + +func listParseRange(startStr string, end int32) (int32, int32, error) { + if len(startStr) == 0 { + startStr = "0" + } + s, err := strconv.ParseInt(startStr, 0, 32) + if err != nil { + return 0, 0, fmt.Errorf("invalid starting token: %s", startStr) + } + start := int32(s) + + if end > 0 { + end = start + end + } else { + end = int32(0) + } + + return start, end, nil +} + +func listGetEnd(length int, start, end int32) (int32, string, error) { + endStr := "" + listLen := int32(length) + if listLen != 0 { + s := start + if s < 0 { + s = -s + } + if s >= listLen { + return 0, "", fmt.Errorf("starting token %d greater than list length", start) + } + if end >= listLen || end <= 0 { + end = listLen + } else { + endStr = fmt.Sprintf("%d", end) + } + } else { + end = int32(0) + } + + return end, endStr, nil +} diff --git a/pkg/glusterfs/client.go b/pkg/glusterfs/client.go new file mode 100644 index 000000000..49f3225de --- /dev/null +++ b/pkg/glusterfs/client.go @@ -0,0 +1,343 @@ +package glusterfs + +import ( + "fmt" + "net/http" + "strconv" + "strings" + + cli "github.com/gluster/gluster-csi-driver/pkg/client" + "github.com/gluster/gluster-csi-driver/pkg/command" + + csi "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/gluster/glusterd2/pkg/api" + gd2Error "github.com/gluster/glusterd2/pkg/errors" + "github.com/gluster/glusterd2/pkg/restclient" + "github.com/golang/glog" +) + +const ( + gd2DefaultInsecure = "false" + volumeOwnerAnn = "VolumeOwner" + defaultReplicaCount = 3 +) + +type gd2Client struct { + client *restclient.Client + driverName string + url string + username string + password string + caCert string + insecure string + insecureBool bool +} + +// NewClient returns a new gd2Client +func NewClient(config *command.Config) (cli.GlusterClient, error) { + var err error + + client := gd2Client{ + driverName: config.Name, + url: config.RestURL, + username: config.RestUser, + password: config.RestSecret, + } + + client.insecure, client.insecureBool = client.setInsecure(gd2DefaultInsecure) + client, err = client.setClient(client) + + return client, err +} + +func (gc gd2Client) checkRespErr(orig error, kind, name string) error { + errResp := gc.client.LastErrorResponse() + //errResp will be nil in case of No route to host error + if errResp != nil && errResp.StatusCode == http.StatusNotFound { + return cli.ErrNotFound(kind, name) + } + return orig +} + +func (gc gd2Client) setInsecure(new string) (string, bool) { + insecure := cli.SetStringIfEmpty(gc.insecure, new) + insecureBool, err := strconv.ParseBool(gc.insecure) + if err != nil { + glog.Errorf("bad value [%s] for glusterd2insecure, using default [%s]", gc.insecure, gd2DefaultInsecure) + insecure = gd2DefaultInsecure + insecureBool, err = strconv.ParseBool(gd2DefaultInsecure) + if err != nil { + panic(err) + } + } + + return insecure, insecureBool +} + +func (gc gd2Client) setClient(client gd2Client) (gd2Client, error) { + if gc.client == nil { + gd2, err := restclient.New(client.url, client.username, client.password, client.caCert, client.insecureBool) + if err != nil { + return gd2Client{}, fmt.Errorf("failed to create glusterd2 REST client: %s", err) + } + err = gd2.Ping() + if err != nil { + return gd2Client{}, fmt.Errorf("error finding glusterd2 server at %s: %v", gc.url, err) + } + + gc.client = gd2 + } + + client.client = gc.client + return client, nil +} + +// GetClusterNodes retrieves a list of cluster peer nodes +func (gc gd2Client) GetClusterNodes(volumeID string) ([]string, error) { + glusterServers := []string{} + + peers, err := gc.client.Peers() + if err != nil { + return nil, err + } + + for _, p := range peers { + for _, a := range p.PeerAddresses { + ip := strings.Split(a, ":") + glusterServers = append(glusterServers, ip[0]) + } + } + + if len(glusterServers) == 0 { + return nil, fmt.Errorf("no hosts found for %s / %s", gc.url, gc.username) + } + + return glusterServers, nil +} + +// CheckExistingVolume checks whether a given volume already exists +func (gc gd2Client) CheckExistingVolume(volumeID string, volSizeBytes int64) error { + vol, err := gc.client.Volumes(volumeID) + if err != nil { + return gc.checkRespErr(err, "volume", volumeID) + } + + volInfo := vol[0] + // Do the owner validation + if glusterAnnVal, found := volInfo.Metadata[volumeOwnerAnn]; !found || glusterAnnVal != gc.driverName { + return cli.ErrExists("volume", volInfo.Name, "owner", glusterAnnVal) + } + + // Check requested capacity is the same as existing capacity + if volSizeBytes > 0 && volInfo.Capacity != uint64(cli.RoundUpToMiB(volSizeBytes)) { + return cli.ErrExists("volume", volInfo.Name, "size (MiB)", strconv.FormatUint(volInfo.Capacity, 10)) + } + + // If volume not started, start the volume + if volInfo.State != api.VolStarted { + err := gc.client.VolumeStart(volInfo.Name, true) + if err != nil { + return fmt.Errorf("failed to start volume %s: %v", volInfo.Name, err) + } + } + + glog.Infof("requested volume %s already exists in the gluster cluster", volumeID) + + return nil +} + +// CreateVolume creates a new volume +func (gc gd2Client) CreateVolume(volumeName string, volSizeBytes int64, params map[string]string) error { + volMetaMap := make(map[string]string) + volMetaMap[volumeOwnerAnn] = gc.driverName + volumeReq := api.VolCreateReq{ + Name: volumeName, + Metadata: volMetaMap, + ReplicaCount: defaultReplicaCount, + Size: uint64(cli.RoundUpToMiB(volSizeBytes)), + } + + glog.V(2).Infof("volume create request: %+v", volumeReq) + volumeCreateResp, err := gc.client.VolumeCreate(volumeReq) + if err != nil { + return fmt.Errorf("failed to create volume %s: %v", volumeName, err) + } + + glog.V(3).Infof("volume create response: %+v", volumeCreateResp) + err = gc.client.VolumeStart(volumeName, true) + if err != nil { + //we dont need to delete the volume if volume start fails + //as we are listing the volumes and starting it again + //before sending back the response + return fmt.Errorf("failed to start volume %s: %v", volumeName, err) + } + + return nil +} + +// DeleteVolume deletes a volume +func (gc gd2Client) DeleteVolume(volumeID string) error { + err := gc.client.VolumeStop(volumeID) + if err != nil && err.Error() != gd2Error.ErrVolAlreadyStopped.Error() { + return gc.checkRespErr(err, "volume", volumeID) + } + + err = gc.client.VolumeDelete(volumeID) + if err != nil { + return gc.checkRespErr(err, "volume", volumeID) + } + + return nil +} + +// ListVolumes lists all volumes in the cluster +func (gc gd2Client) ListVolumes() ([]*csi.Volume, error) { + volumes := []*csi.Volume{} + + vols, err := gc.client.Volumes("") + if err != nil { + return nil, err + } + + for _, vol := range vols { + v, err := gc.client.VolumeStatus(vol.Name) + if err != nil { + glog.V(1).Infof("error getting volume %s status: %s", vol.Name, err) + continue + } + volumes = append(volumes, &csi.Volume{ + Id: vol.Name, + CapacityBytes: (int64(v.Size.Capacity)) * cli.MB, + }) + } + + return volumes, nil +} + +func (gc *gd2Client) csiSnap(snap api.SnapInfo) *csi.Snapshot { + return &csi.Snapshot{ + Id: snap.VolInfo.Name, + SourceVolumeId: snap.ParentVolName, + CreatedAt: snap.CreatedAt.Unix(), + SizeBytes: (int64(snap.VolInfo.Capacity)) * cli.MB, + Status: &csi.SnapshotStatus{ + Type: csi.SnapshotStatus_READY, + }, + } +} + +// CheckExistingSnapshot checks if a snapshot exists in the TSP +func (gc gd2Client) CheckExistingSnapshot(snapName, volName string) (*csi.Snapshot, error) { + snapInfo, err := gc.client.SnapshotInfo(snapName) + if err != nil { + return nil, gc.checkRespErr(err, "snapshot", snapName) + } + if len(volName) != 0 && snapInfo.ParentVolName != volName { + return nil, cli.ErrExists("snapshot", snapName, "parent volume", snapInfo.ParentVolName) + } + + if snapInfo.VolInfo.State != api.VolStarted { + actReq := api.SnapActivateReq{ + Force: true, + } + err = gc.client.SnapshotActivate(actReq, snapName) + if err != nil { + return nil, fmt.Errorf("failed to activate snapshot: %v", err) + } + } + + return gc.csiSnap(api.SnapInfo(snapInfo)), nil +} + +// CreateSnapshot creates a snapshot of an existing volume +func (gc gd2Client) CreateSnapshot(snapName, srcVol string) (*csi.Snapshot, error) { + snapReq := api.SnapCreateReq{ + VolName: srcVol, + SnapName: snapName, + Force: true, + } + glog.V(2).Infof("snapshot request: %+v", snapReq) + snapInfo, err := gc.client.SnapshotCreate(snapReq) + if err != nil { + return nil, fmt.Errorf("failed to create snapshot %s: %v", snapName, err) + } + + err = gc.client.SnapshotActivate(api.SnapActivateReq{Force: true}, snapName) + if err != nil { + return nil, fmt.Errorf("failed to activate snapshot %s: %v", snapName, err) + } + + return gc.csiSnap(api.SnapInfo(snapInfo)), nil +} + +// CloneSnapshot creates a clone of a snapshot +func (gc gd2Client) CloneSnapshot(snapName, volName string) error { + var snapreq api.SnapCloneReq + + glog.V(2).Infof("creating volume from snapshot %s", snapName) + snapreq.CloneName = volName + snapResp, err := gc.client.SnapshotClone(snapName, snapreq) + if err != nil { + return fmt.Errorf("failed to create volume clone: %v", err) + } + err = gc.client.VolumeStart(volName, true) + if err != nil { + return fmt.Errorf("failed to start volume: %v", err) + } + glog.V(1).Infof("snapshot clone response: %+v", snapResp) + return nil +} + +// DeleteSnapshot deletes a snapshot +func (gc gd2Client) DeleteSnapshot(snapName string) error { + err := gc.client.SnapshotDeactivate(snapName) + if err != nil { + //if errResp != nil && errResp.StatusCode != http.StatusNotFound && err.Error() != gd2Error.ErrSnapDeactivated.Error() { + return gc.checkRespErr(err, "snapshot", snapName) + } + err = gc.client.SnapshotDelete(snapName) + if err != nil { + return gc.checkRespErr(err, "snapshot", snapName) + } + return nil +} + +// ListSnapshots lists all snapshots +func (gc gd2Client) ListSnapshots(snapName, srcVol string) ([]*csi.Snapshot, error) { + var snaps []*csi.Snapshot + var snaplist api.SnapListResp + + if len(snapName) != 0 { + // Get snapshot by name + snap, err := gc.client.SnapshotInfo(snapName) + if err == nil { + snapInfo := api.SnapInfo(snap) + snaplist = append(snaplist, api.SnapList{ParentName: snapInfo.ParentVolName, SnapList: []api.SnapInfo{snapInfo}}) + } + } else if len(srcVol) != 0 { + // Get all snapshots for source volume + snaps, err := gc.client.SnapshotList(srcVol) + if err != nil { + errResp := gc.client.LastErrorResponse() + if errResp != nil && errResp.StatusCode != http.StatusNotFound { + return nil, fmt.Errorf("[%s/%s]: %v", gc.url, gc.username, err) + } + } + snaplist = append(snaplist, snaps...) + } else { + // Get all snapshots in TSP + snaps, err := gc.client.SnapshotList("") + if err != nil { + return nil, fmt.Errorf("[%s/%s]: %v", gc.url, gc.username, err) + } + snaplist = append(snaplist, snaps...) + } + + for _, snap := range snaplist { + for _, s := range snap.SnapList { + snaps = append(snaps, gc.csiSnap(s)) + } + + } + return snaps, nil +} diff --git a/pkg/glusterfs/controllerserver.go b/pkg/glusterfs/controllerserver.go deleted file mode 100644 index 0592b43f1..000000000 --- a/pkg/glusterfs/controllerserver.go +++ /dev/null @@ -1,702 +0,0 @@ -package glusterfs - -import ( - "context" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" - - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/gluster/glusterd2/pkg/api" - gd2Error "github.com/gluster/glusterd2/pkg/errors" - "github.com/golang/glog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - volumeOwnerAnn = "VolumeOwner" - defaultVolumeSize int64 = 1000 * utils.MB // default volume size ie 1 GB - defaultReplicaCount = 3 -) - -var errVolumeNotFound = errors.New("volume not found") - -// ControllerServer struct of GlusterFS CSI driver with supported methods of CSI controller server spec. -type ControllerServer struct { - *GfDriver -} - -// CsiDrvParam stores csi driver specific request parameters. -// This struct will be used to gather specific fields of CSI driver: -// For eg. csiDrvName, csiDrvVersion..etc -// and also gather parameters passed from SC which not part of gluster volcreate api. -// GlusterCluster - The resturl of gluster cluster -// GlusterUser - The gluster username who got access to the APIs. -// GlusterUserToken - The password/token of glusterUser to connect to glusterCluster -// GlusterVersion - Says the version of the glustercluster running in glusterCluster endpoint. -// All of these fields are optional and can be used if needed. -type CsiDrvParam struct { - GlusterCluster string - GlusterUser string - GlusterUserToken string - GlusterVersion string - CsiDrvName string - CsiDrvVersion string -} - -// ProvisionerConfig is the combined configuration of gluster cli vol create request and CSI driver specific input -type ProvisionerConfig struct { - gdVolReq *api.VolCreateReq - //csiConf *CsiDrvParam -} - -// ParseCreateVolRequest parse incoming volume create request and fill ProvisionerConfig. -func (cs *ControllerServer) ParseCreateVolRequest(req *csi.CreateVolumeRequest) (*ProvisionerConfig, error) { - - var reqConf ProvisionerConfig - var gdReq api.VolCreateReq - - reqConf.gdVolReq = &gdReq - - // Get Volume name - if req != nil { - reqConf.gdVolReq.Name = req.Name - } - return &reqConf, nil -} - -// CreateVolume creates and starts the volume -func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { - glog.V(2).Infof("request received %+v", req) - - if err := cs.validateCreateVolumeReq(req); err != nil { - return nil, err - } - - glog.V(1).Infof("creating volume with name %s", req.Name) - - volSizeBytes := cs.getVolumeSize(req) - volSizeMB := int(utils.RoundUpSize(volSizeBytes, 1024*1024)) - - // parse the request. - parseResp, err := cs.ParseCreateVolRequest(req) - if err != nil { - return nil, status.Error(codes.InvalidArgument, "failed to parse request") - } - - volumeName := parseResp.gdVolReq.Name - - err = cs.checkExistingVolume(volumeName, volSizeMB) - if err != nil { - if err != errVolumeNotFound { - glog.Errorf("error checking for pre-existing volume: %v", err) - return nil, err - } - - if req.VolumeContentSource.GetSnapshot().GetId() != "" { - snapName := req.VolumeContentSource.GetSnapshot().GetId() - glog.V(2).Infof("creating volume from snapshot %s", snapName) - err = cs.checkExistingSnapshot(snapName, req.GetName()) - if err != nil { - return nil, err - } - } else { - // If volume does not exist, provision volume - err = cs.doVolumeCreate(volumeName, volSizeMB) - if err != nil { - return nil, err - } - } - } - err = cs.client.VolumeStart(volumeName, true) - if err != nil { - //we dont need to delete the volume if volume start fails - //as we are listing the volumes and starting it again - //before sending back the response - glog.Errorf("failed to start volume: %v", err) - return nil, status.Errorf(codes.Internal, "failed to start volume: %v", err) - } - - glusterServer, bkpServers, err := cs.getClusterNodes() - if err != nil { - glog.Errorf("failed to get cluster nodes: %v", err) - return nil, status.Errorf(codes.Internal, "failed to get cluster nodes: %v", err) - } - - resp := &csi.CreateVolumeResponse{ - Volume: &csi.Volume{ - Id: volumeName, - CapacityBytes: volSizeBytes, - Attributes: map[string]string{ - "glustervol": volumeName, - "glusterserver": glusterServer, - "glusterbkpservers": strings.Join(bkpServers, ":"), - }, - }, - } - - glog.V(4).Infof("CSI volume response: %+v", resp) - return resp, nil -} - -func (cs *ControllerServer) getVolumeSize(req *csi.CreateVolumeRequest) int64 { - // If capacity mentioned, pick that or use default size 1 GB - volSizeBytes := defaultVolumeSize - if capRange := req.GetCapacityRange(); capRange != nil { - volSizeBytes = capRange.GetRequiredBytes() - } - return volSizeBytes -} - -func (cs *ControllerServer) checkExistingSnapshot(snapName, volName string) error { - snapInfo, err := cs.GfDriver.client.SnapshotInfo(snapName) - if err != nil { - errResp := cs.client.LastErrorResponse() - //errResp will be nil in case of No route to host error - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return status.Errorf(codes.NotFound, "failed to get snapshot info %s", err.Error()) - } - return status.Error(codes.Internal, err.Error()) - } - - if snapInfo.VolInfo.State != api.VolStarted { - actReq := api.SnapActivateReq{ - Force: true, - } - err = cs.client.SnapshotActivate(actReq, snapName) - if err != nil { - glog.Errorf("failed to activate snapshot: %v", err) - return status.Errorf(codes.Internal, "failed to activate snapshot %s", err.Error()) - } - } - //create snapshot clone - err = cs.createSnapshotClone(snapName, volName) - return err -} - -func (cs *ControllerServer) createSnapshotClone(snapName, volName string) error { - var snapreq api.SnapCloneReq - snapreq.CloneName = volName - snapResp, err := cs.client.SnapshotClone(snapName, snapreq) - if err != nil { - glog.Errorf("failed to create volume clone: %v", err) - return status.Errorf(codes.Internal, "failed to create volume clone: %s", err.Error()) - } - glog.V(1).Infof("snapshot clone response : %+v", snapResp) - return nil -} - -func (cs *ControllerServer) validateCreateVolumeReq(req *csi.CreateVolumeRequest) error { - if req == nil { - return status.Errorf(codes.InvalidArgument, "request cannot be empty") - } - - if req.GetName() == "" { - return status.Error(codes.InvalidArgument, "name is a required field") - } - - if reqCaps := req.GetVolumeCapabilities(); reqCaps == nil { - return status.Error(codes.InvalidArgument, "volume capabilities is a required field") - } - - return nil -} - -func (cs *ControllerServer) doVolumeCreate(volumeName string, volSizeMB int) error { - glog.V(4).Infof("received request to create volume %s with size %d", volumeName, volSizeMB) - volMetaMap := make(map[string]string) - volMetaMap[volumeOwnerAnn] = glusterfsCSIDriverName - volumeReq := api.VolCreateReq{ - Name: volumeName, - Metadata: volMetaMap, - ReplicaCount: defaultReplicaCount, - Size: uint64(volSizeMB), - } - - glog.V(2).Infof("volume create request: %+v", volumeReq) - volumeCreateResp, err := cs.client.VolumeCreate(volumeReq) - if err != nil { - glog.Errorf("failed to create volume: %v", err) - return status.Errorf(codes.Internal, "failed to create volume: %v", err) - } - - glog.V(3).Infof("volume create response : %+v", volumeCreateResp) - return nil -} - -func (cs *ControllerServer) checkExistingVolume(volumeName string, volSizeMB int) error { - vol, err := cs.client.Volumes(volumeName) - if err != nil { - glog.Errorf("failed to fetch volume : %v", err) - errResp := cs.client.LastErrorResponse() - //errResp will be nil in case of `No route to host` error - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return errVolumeNotFound - } - return status.Errorf(codes.Internal, "error in fetching volume details %v", err) - } - - volInfo := vol[0] - // Do the owner validation - if glusterAnnVal, found := volInfo.Metadata[volumeOwnerAnn]; !found || (found && glusterAnnVal != glusterfsCSIDriverName) { - return status.Errorf(codes.Internal, "volume %s (%s) is not owned by GlusterFS CSI driver", - volInfo.Name, volInfo.Metadata) - } - - if int(volInfo.Capacity) != volSizeMB { - return status.Errorf(codes.AlreadyExists, "volume %s already exits with different size: %d", volInfo.Name, volInfo.Capacity) - } - - //volume has not started, start the volume - if volInfo.State != api.VolStarted { - err = cs.client.VolumeStart(volInfo.Name, true) - if err != nil { - return status.Errorf(codes.Internal, "failed to start volume %s: %v", volInfo.Name, err) - } - } - - glog.Infof("requested volume %s already exists in the gluster cluster", volumeName) - - return nil -} - -func (cs *ControllerServer) getClusterNodes() (string, []string, error) { - peers, err := cs.client.Peers() - if err != nil { - return "", nil, err - } - glusterServer := "" - bkpservers := []string{} - - for i, p := range peers { - if i == 0 { - for _, a := range p.PeerAddresses { - ip := strings.Split(a, ":") - glusterServer = ip[0] - } - - continue - } - for _, a := range p.PeerAddresses { - ip := strings.Split(a, ":") - bkpservers = append(bkpservers, ip[0]) - } - - } - glog.V(2).Infof("primary and backup gluster servers [%+v,%+v]", glusterServer, bkpservers) - - return glusterServer, bkpservers, err -} - -// DeleteVolume deletes the given volume. -func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "volume delete request is nil") - } - - volumeID := req.GetVolumeId() - if volumeID == "" { - return nil, status.Error(codes.InvalidArgument, "volume ID is nil") - } - glog.V(2).Infof("deleting volume with ID: %s", volumeID) - - // Stop volume - err := cs.client.VolumeStop(req.VolumeId) - - if err != nil { - errResp := cs.client.LastErrorResponse() - //errResp will be nil in case of `No route to host` error - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return &csi.DeleteVolumeResponse{}, nil - } - if err.Error() != gd2Error.ErrVolAlreadyStopped.Error() { - glog.Errorf("failed to stop volume %s: %v", volumeID, err) - return nil, status.Errorf(codes.Internal, "failed to stop volume %s: %v", volumeID, err) - } - } - - // Delete volume - err = cs.client.VolumeDelete(req.VolumeId) - if err != nil { - errResp := cs.client.LastErrorResponse() - //errResp will be nil in case of No route to host error - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return &csi.DeleteVolumeResponse{}, nil - } - glog.Errorf("deleting volume %s failed: %v", req.VolumeId, err) - return nil, status.Errorf(codes.Internal, "deleting volume %s failed: %v", req.VolumeId, err) - } - - glog.Infof("successfully deleted volume %s", volumeID) - return &csi.DeleteVolumeResponse{}, nil -} - -// ControllerPublishVolume return Unimplemented error -func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// ControllerUnpublishVolume return Unimplemented error -func (cs *ControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// ValidateVolumeCapabilities checks whether the volume capabilities requested -// are supported. -func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - if req == nil { - return nil, status.Errorf(codes.InvalidArgument, "ValidateVolumeCapabilities() - request is nil") - } - - volumeID := req.GetVolumeId() - if volumeID == "" { - return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities() - VolumeId is nil") - } - - reqCaps := req.GetVolumeCapabilities() - if reqCaps == nil { - return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities() - VolumeCapabilities is nil") - } - - _, err := cs.client.Volumes(volumeID) - if err != nil { - return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities() - %v", err) - } - - var vcaps []*csi.VolumeCapability_AccessMode - for _, mode := range []csi.VolumeCapability_AccessMode_Mode{ - csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, - csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, - csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, - } { - vcaps = append(vcaps, &csi.VolumeCapability_AccessMode{Mode: mode}) - } - capSupport := true - IsSupport := func(mode csi.VolumeCapability_AccessMode_Mode) bool { - for _, m := range vcaps { - if mode == m.Mode { - return true - } - } - return false - } - for _, cap := range reqCaps { - if !IsSupport(cap.AccessMode.Mode) { - capSupport = false - } - } - - resp := &csi.ValidateVolumeCapabilitiesResponse{ - Supported: capSupport, - } - glog.V(1).Infof("GlusterFS CSI driver volume capabilities: %+v", resp) - return resp, nil -} - -// ListVolumes returns a list of volumes -func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { - //Fetch all the volumes in the TSP - volumes, err := cs.client.Volumes("") - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - var entries []*csi.ListVolumesResponse_Entry - for _, vol := range volumes { - entries = append(entries, &csi.ListVolumesResponse_Entry{ - Volume: &csi.Volume{ - Id: vol.Name, - CapacityBytes: (int64(vol.Capacity)) * utils.MB, - }, - }) - } - - resp := &csi.ListVolumesResponse{ - Entries: entries, - } - - return resp, nil -} - -// GetCapacity returns the capacity of the storage pool -func (cs *ControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// ControllerGetCapabilities returns the capabilities of the controller service. -func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { - newCap := func(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability { - return &csi.ControllerServiceCapability{ - Type: &csi.ControllerServiceCapability_Rpc{ - Rpc: &csi.ControllerServiceCapability_RPC{ - Type: cap, - }, - }, - } - } - - var caps []*csi.ControllerServiceCapability - for _, cap := range []csi.ControllerServiceCapability_RPC_Type{ - csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, - csi.ControllerServiceCapability_RPC_LIST_VOLUMES, - csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, - csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, - } { - caps = append(caps, newCap(cap)) - } - - resp := &csi.ControllerGetCapabilitiesResponse{ - Capabilities: caps, - } - - return resp, nil -} - -// CreateSnapshot create snapshot of an existing PV -func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { - - if err := cs.validateCreateSnapshotReq(req); err != nil { - return nil, err - } - glog.V(2).Infof("received request to create snapshot %v from volume %v", req.GetName(), req.GetSourceVolumeId()) - - snapInfo, err := cs.GfDriver.client.SnapshotInfo(req.Name) - if err != nil { - glog.Errorf("failed to get snapshot info for %v with Error %v", req.GetName(), err.Error()) - errResp := cs.client.LastErrorResponse() - //errResp will be nil in case of No route to host error - if errResp != nil && errResp.StatusCode != http.StatusNotFound { - - return nil, status.Errorf(codes.Internal, "CreateSnapshot - failed to get snapshot info %s", err.Error()) - } - if errResp == nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - } else { - - if snapInfo.ParentVolName != req.GetSourceVolumeId() { - glog.Errorf("snapshot %v belongs to different volume %v", req.GetName(), snapInfo.ParentVolName) - return nil, status.Errorf(codes.AlreadyExists, "CreateSnapshot - snapshot %s belongs to different volume %s", snapInfo.ParentVolName, req.GetSourceVolumeId()) - } - - return &csi.CreateSnapshotResponse{ - Snapshot: &csi.Snapshot{ - Id: snapInfo.VolInfo.Name, - SourceVolumeId: snapInfo.ParentVolName, - CreatedAt: snapInfo.CreatedAt.Unix(), - SizeBytes: (int64(snapInfo.VolInfo.Capacity)) * utils.MB, - Status: &csi.SnapshotStatus{ - Type: csi.SnapshotStatus_READY, - }, - }, - }, nil - } - - snapReq := api.SnapCreateReq{ - VolName: req.SourceVolumeId, - SnapName: req.Name, - Force: true, - } - glog.V(2).Infof("snapshot request: %+v", snapReq) - snapResp, err := cs.client.SnapshotCreate(snapReq) - if err != nil { - glog.Errorf("failed to create snapshot %v", err) - return nil, status.Errorf(codes.Internal, "CreateSnapshot - snapshot create failed %s", err.Error()) - } - - actReq := api.SnapActivateReq{ - Force: true, - } - err = cs.client.SnapshotActivate(actReq, req.Name) - if err != nil { - glog.Errorf("failed to activate snapshot %v", err) - return nil, status.Errorf(codes.Internal, "failed to activate snapshot %s", err.Error()) - } - return &csi.CreateSnapshotResponse{ - Snapshot: &csi.Snapshot{ - Id: snapResp.VolInfo.Name, - SourceVolumeId: snapResp.ParentVolName, - CreatedAt: snapResp.CreatedAt.Unix(), - SizeBytes: (int64(snapResp.VolInfo.Capacity)) * utils.MB, - Status: &csi.SnapshotStatus{ - Type: csi.SnapshotStatus_READY, - }, - }, - }, nil -} - -func (cs *ControllerServer) validateCreateSnapshotReq(req *csi.CreateSnapshotRequest) error { - if req == nil { - return status.Errorf(codes.InvalidArgument, "CreateSnapshot request is nil") - } - if req.GetName() == "" { - return status.Error(codes.InvalidArgument, "CreateSnapshot - name cannot be nil") - } - - if req.GetSourceVolumeId() == "" { - return status.Error(codes.InvalidArgument, "CreateSnapshot - sourceVolumeId is nil") - } - if req.GetName() == req.GetSourceVolumeId() { - //In glusterd2 we cannot create a snapshot as same name as volume name - return status.Error(codes.InvalidArgument, "CreateSnapshot - sourceVolumeId and snapshot name cannot be same") - } - return nil -} - -// DeleteSnapshot delete provided snapshot of a PV -func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { - if req == nil { - return nil, status.Errorf(codes.InvalidArgument, "DeleteSnapshot request is nil") - } - if req.GetSnapshotId() == "" { - return nil, status.Error(codes.InvalidArgument, "DeleteSnapshot - snapshotId is empty") - } - glog.V(4).Infof("deleting snapshot %s", req.GetSnapshotId()) - - err := cs.client.SnapshotDeactivate(req.GetSnapshotId()) - if err != nil { - errResp := cs.client.LastErrorResponse() - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return &csi.DeleteSnapshotResponse{}, nil - } - - if err.Error() != gd2Error.ErrSnapDeactivated.Error() { - glog.Errorf("failed to deactivate snapshot %v", err) - return nil, status.Errorf(codes.Internal, "DeleteSnapshot - failed to deactivate snapshot %s", err.Error()) - } - - } - err = cs.client.SnapshotDelete(req.SnapshotId) - if err != nil { - errResp := cs.client.LastErrorResponse() - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - return &csi.DeleteSnapshotResponse{}, nil - } - glog.Errorf("failed to delete snapshot %v", err) - return nil, status.Errorf(codes.Internal, "DeleteSnapshot - failed to delete snapshot %s", err.Error()) - } - return &csi.DeleteSnapshotResponse{}, nil -} - -// ListSnapshots list the snapshots of a PV -func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { - var ( - snaplist api.SnapListResp - err error - startToken int32 - ) - if req.GetStartingToken() != "" { - i, parseErr := strconv.ParseUint(req.StartingToken, 10, 32) - if parseErr != nil { - return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error()) - } - startToken = int32(i) - } - - if len(req.GetSnapshotId()) != 0 { - return cs.listSnapshotFromID(req.GetSnapshotId()) - } - - //If volume id is sent - if len(req.GetSourceVolumeId()) != 0 { - snaplist, err = cs.client.SnapshotList(req.SourceVolumeId) - if err != nil { - errResp := cs.client.LastErrorResponse() - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - resp := csi.ListSnapshotsResponse{} - return &resp, nil - } - glog.Errorf("failed to list snapshots %v", err) - return nil, status.Errorf(codes.Internal, "ListSnapshot - failed to get snapshots %s", err.Error()) - } - } else { - //Get all snapshots - snaplist, err = cs.client.SnapshotList("") - if err != nil { - glog.Errorf("failed to list snapshots %v", err) - return nil, status.Errorf(codes.Internal, "failed to get snapshots %s", err.Error()) - } - } - - return cs.doPagination(req, snaplist, startToken) -} - -func (cs *ControllerServer) listSnapshotFromID(snapID string) (*csi.ListSnapshotsResponse, error) { - var entries []*csi.ListSnapshotsResponse_Entry - snap, err := cs.GfDriver.client.SnapshotInfo(snapID) - if err != nil { - errResp := cs.client.LastErrorResponse() - if errResp != nil && errResp.StatusCode == http.StatusNotFound { - resp := csi.ListSnapshotsResponse{} - return &resp, nil - } - glog.Errorf("failed to get snapshot info %v", err) - return nil, status.Errorf(codes.NotFound, "ListSnapshot - failed to get snapshot info %s", err.Error()) - - } - entries = append(entries, &csi.ListSnapshotsResponse_Entry{ - Snapshot: &csi.Snapshot{ - Id: snap.VolInfo.Name, - SourceVolumeId: snap.ParentVolName, - CreatedAt: snap.CreatedAt.Unix(), - SizeBytes: (int64(snap.VolInfo.Capacity)) * utils.MB, - Status: &csi.SnapshotStatus{ - Type: csi.SnapshotStatus_READY, - }, - }, - }) - - resp := csi.ListSnapshotsResponse{} - resp.Entries = entries - return &resp, nil - -} -func (cs *ControllerServer) doPagination(req *csi.ListSnapshotsRequest, snapList api.SnapListResp, startToken int32) (*csi.ListSnapshotsResponse, error) { - if req.GetStartingToken() != "" && int(startToken) > len(snapList) { - return nil, status.Error(codes.Aborted, "invalid starting token") - } - - var entries []*csi.ListSnapshotsResponse_Entry - for _, snap := range snapList { - for _, s := range snap.SnapList { - entries = append(entries, &csi.ListSnapshotsResponse_Entry{ - Snapshot: &csi.Snapshot{ - Id: s.VolInfo.Name, - SourceVolumeId: snap.ParentName, - CreatedAt: s.CreatedAt.Unix(), - SizeBytes: (int64(s.VolInfo.Capacity)) * utils.MB, - Status: &csi.SnapshotStatus{ - Type: csi.SnapshotStatus_READY, - }, - }, - }) - } - - } - - //TODO need to remove paginating code once glusterd2 issue - //https://github.com/gluster/glusterd2/issues/372 is merged - var ( - maximumEntries = req.MaxEntries - nextToken int32 - remainingEntries = int32(len(snapList)) - startToken - resp csi.ListSnapshotsResponse - ) - - if maximumEntries == 0 || maximumEntries > remainingEntries { - maximumEntries = remainingEntries - } - - resp.Entries = entries[startToken : startToken+maximumEntries] - - if nextToken = startToken + maximumEntries; nextToken < int32(len(snapList)) { - resp.NextToken = fmt.Sprintf("%d", nextToken) - } - return &resp, nil -} diff --git a/pkg/glusterfs/driver.go b/pkg/glusterfs/driver.go index fde21d3e7..405ea0856 100644 --- a/pkg/glusterfs/driver.go +++ b/pkg/glusterfs/driver.go @@ -1,71 +1,42 @@ package glusterfs import ( - "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" + "os" - "github.com/gluster/glusterd2/pkg/restclient" - "github.com/golang/glog" - "github.com/kubernetes-csi/drivers/pkg/csi-common" -) + "github.com/gluster/gluster-csi-driver/pkg/command" + "github.com/gluster/gluster-csi-driver/pkg/controller" -const ( - glusterfsCSIDriverName = "org.gluster.glusterfs" - glusterfsCSIDriverVersion = "0.0.9" + "github.com/golang/glog" ) -// GfDriver is the struct embedding information about the connection to gluster cluster and configuration of CSI driver. -type GfDriver struct { - client *restclient.Client - *utils.Config +// Driver implements command.Driver +type Driver struct { + *command.Config } -// New returns CSI driver -func New(config *utils.Config) *GfDriver { - gfd := &GfDriver{} - - if config == nil { - glog.Errorf("GlusterFS CSI driver initialization failed") - return nil - } +// New returns a new Driver +func New(config *command.Config) *Driver { + gd := &Driver{} - gfd.Config = config - var err error - gfd.client, err = restclient.New(config.RestURL, config.RestUser, config.RestSecret, "", false) - - if err != nil { - glog.Errorf("error creating glusterd2 REST client: %s", err.Error()) + if config != nil { + gd.Config = config + } else { + glog.Error("failed to initialize GlusterD2 driver: config is nil") return nil } - glog.V(1).Infof("GlusterFS CSI driver initialized") + glog.V(1).Infof("%s initialized", gd.Desc) - return gfd + return gd } -// NewControllerServer initialize a controller server for GlusterFS CSI driver. -func NewControllerServer(g *GfDriver) *ControllerServer { - return &ControllerServer{ - GfDriver: g, - } -} - -// NewNodeServer initialize a node server for GlusterFS CSI driver. -func NewNodeServer(g *GfDriver) *NodeServer { - return &NodeServer{ - GfDriver: g, - } -} - -// NewIdentityServer initialize an identity server for GlusterFS CSI driver. -func NewIdentityServer(g *GfDriver) *IdentityServer { - return &IdentityServer{ - GfDriver: g, +// Run runs the driver +func (d *Driver) Run() { + client, err := NewClient(d.Config) + if err != nil { + glog.Errorf("failed to get gd2Client: %v", err) + os.Exit(1) } -} -// Run start a non-blocking grpc controller,node and identityserver for GlusterFS CSI driver which can serve multiple parallel requests -func (g *GfDriver) Run() { - srv := csicommon.NewNonBlockingGRPCServer() - srv.Start(g.Endpoint, NewIdentityServer(g), NewControllerServer(g), NewNodeServer(g)) - srv.Wait() + controller.Run(d.Config, client) } diff --git a/pkg/glusterfs/driver_test.go b/pkg/glusterfs/driver_test.go deleted file mode 100644 index cc689a5c0..000000000 --- a/pkg/glusterfs/driver_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package glusterfs - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/gluster/gluster-csi-driver/pkg/glusterfs/utils" - - "github.com/gluster/glusterd2/pkg/api" - "github.com/gluster/glusterd2/pkg/restclient" - "github.com/kubernetes-csi/csi-test/pkg/sanity" - "github.com/pborman/uuid" - "k8s.io/kubernetes/pkg/util/mount" -) - -type volume struct { - Size uint64 - snapList []string -} - -var volumeCache = make(map[string]volume) - -func TestDriverSuite(t *testing.T) { - glusterMounter = &mount.FakeMounter{} - socket := "/tmp/csi.sock" - endpoint := "unix://" + socket - - //cleanup socket file if already present - os.Remove(socket) - - _, err := os.Create(socket) - if err != nil { - t.Fatal("Failed to create a socket file") - } - defer os.Remove(socket) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case "GET": - handleGETRequest(w, r, t) - - case "DELETE": - handleDeleteRequest(w, r, t) - - case "POST": - handlePOSTRequest(w, r, t) - } - })) - - defer ts.Close() - - url, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } - doClient, err := restclient.New(url.String(), "", "", "", false) - if err != nil { - t.Fatal(err) - } - - d := GfDriver{ - - client: doClient, - } - d.Config = new(utils.Config) - d.Endpoint = endpoint - d.NodeID = "testing" - go d.Run() - - mntStageDir := "/tmp/mntStageDir" - mntDir := "/tmp/mntDir" - defer os.RemoveAll(mntStageDir) - defer os.RemoveAll(mntDir) - - cfg := &sanity.Config{ - StagingPath: mntStageDir, - TargetPath: mntDir, - Address: endpoint, - } - - sanity.Test(t, cfg) -} - -func handleGETRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { - id := uuid.Parse("02dfdd19-e01e-46ec-a887-97b309a7dd2f") - if strings.Contains(r.URL.String(), "/v1/peers") { - resp := make(api.PeerListResp, 1) - resp[0] = api.PeerGetResp{ - Name: "node1.gluster.org", - PeerAddresses: []string{ - "127.0.0.1:24008"}, - ClientAddresses: []string{ - "127.0.0.1:24007", - "127.0.0.1:24007"}, - Online: true, - PID: 24935, - Metadata: map[string]string{ - "_zone": "02dfdd19-e01e-46ec-a887-97b309a7dd2f", - }, - } - resp = append(resp, api.PeerGetResp{ - Name: "node2.com", - PeerAddresses: []string{ - "127.0.0.1:24008"}, - ClientAddresses: []string{ - "127.0.0.1:24007"}, - Online: true, - PID: 24935, - Metadata: map[string]string{ - "_zone": "02dfdd19-e01e-46ec-a887-97b309a7dd2f", - }, - }) - writeResp(w, http.StatusOK, resp, t) - return - } - - if r.URL.String() == "/v1/volumes" { - resp := make(api.VolumeListResp, 1) - resp[0] = api.VolumeGetResp{ - ID: id, - Name: "test1", - Metadata: map[string]string{volumeOwnerAnn: glusterfsCSIDriverName}, - State: api.VolStarted, - Capacity: 1000, - } - writeResp(w, http.StatusOK, resp, t) - volumeCache["test1"] = volume{Size: 1000} - return - } - - if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { - getSnapShots(w, r, t) - return - } - - vol := strings.Split(strings.Trim(r.URL.String(), "/"), "/") - if checkVolume(vol[2]) { - resp := api.VolumeGetResp{ - ID: id, - Name: vol[2], - Metadata: map[string]string{volumeOwnerAnn: glusterfsCSIDriverName}, - State: api.VolStarted, - Capacity: volumeCache[vol[2]].Size, - } - writeResp(w, http.StatusOK, resp, t) - return - } - resp := api.ErrorResp{} - resp.Errors = append(resp.Errors, api.HTTPError{ - Code: 1, - }) - writeResp(w, http.StatusNotFound, resp, t) -} - -func getSnapShots(w http.ResponseWriter, r *http.Request, t *testing.T) { - if strings.Contains(r.URL.String(), "/v1/snapshots/") { - vol := strings.Split(strings.Trim(r.URL.String(), "/"), "/") - if getVolumeNameFromSnap(vol[2]) != "" { - var res api.SnapInfo - res.VolInfo.Name = vol[2] - res.CreatedAt = time.Now() - res.ParentVolName = getVolumeNameFromSnap(vol[2]) - writeResp(w, http.StatusOK, res, t) - return - } - resp := api.ErrorResp{} - resp.Errors = append(resp.Errors, api.HTTPError{ - Code: 1, - Message: "failed to get snapshot", - Fields: map[string]string{ - "failed": "failed", - }, - }) - writeResp(w, http.StatusNotFound, resp, t) - return - } - - if v, ok := r.URL.Query()["volume"]; ok { - if getSnapNameFromVol(v[0]) == "" { - writeResp(w, http.StatusOK, api.SnapListResp{}, t) - return - } - res := make(api.SnapListResp, 0) - snapList := api.SnapList{} - for _, snap := range volumeCache[v[0]].snapList { - listresp := api.SnapInfo{} - listresp.VolInfo.Name = snap - listresp.ParentVolName = v[0] - listresp.CreatedAt = time.Now() - snapList.ParentName = v[0] - snapList.SnapList = append(snapList.SnapList, listresp) - } - res = append(res, snapList) - writeResp(w, http.StatusOK, res, t) - return - } - - if isSnapsPresent() { - res := make(api.SnapListResp, 0) - for vol, snap := range volumeCache { - snapList := api.SnapList{} - for _, s := range snap.snapList { - listresp := api.SnapInfo{} - listresp.VolInfo.Name = s - listresp.ParentVolName = vol - listresp.CreatedAt = time.Now() - snapList.ParentName = vol - snapList.SnapList = append(snapList.SnapList, listresp) - } - if snapList.ParentName != "" { - res = append(res, snapList) - } - } - writeResp(w, http.StatusOK, res, t) - return - } - res := make(api.SnapListResp, 1) - listresp := api.SnapInfo{} - listresp.VolInfo.Name = "snaptest1" - listresp.ParentVolName = "voleTest" - listresp.CreatedAt = time.Now() - res[0].ParentName = "volTest" - res[0].SnapList = append(res[0].SnapList, listresp) - volumeCache["volTest"] = volume{ - snapList: []string{"snaptest1"}, - } - writeResp(w, http.StatusOK, res, t) - -} -func handlePOSTRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { - if strings.HasSuffix(r.URL.String(), "start") || strings.HasSuffix(r.URL.String(), "stop") { - w.WriteHeader(http.StatusOK) - return - } - if strings.HasSuffix(r.URL.String(), "activate") || strings.HasSuffix(r.URL.String(), "deactivate") { - w.WriteHeader(http.StatusOK) - return - } - if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { - var resp api.SnapCreateResp - - var req api.SnapCreateReq - defer r.Body.Close() - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - resp.VolInfo.Name = req.SnapName - resp.ParentVolName = req.VolName - resp.CreatedAt = time.Now() - volResp := volumeCache[req.VolName] - volResp.snapList = append(volResp.snapList, req.SnapName) - volumeCache[req.VolName] = volResp - writeResp(w, http.StatusCreated, resp, t) - } - if strings.HasPrefix(r.URL.String(), "/v1/volumes") { - var resp api.VolumeCreateResp - - var req api.VolCreateReq - defer r.Body.Close() - json.NewDecoder(r.Body).Decode(&req) - resp.Name = req.Name - volumeCache[req.Name] = volume{Size: req.Size} - writeResp(w, http.StatusCreated, resp, t) - } -} -func handleDeleteRequest(w http.ResponseWriter, r *http.Request, t *testing.T) { - if strings.HasPrefix(r.URL.String(), "/v1/snapshots") { - key := strings.Split(strings.Trim(r.URL.String(), "/"), "/") - deleteSnap(key[2]) - } - w.WriteHeader(http.StatusNoContent) -} -func checkVolume(vol string) bool { - _, ok := volumeCache[vol] - return ok -} - -func isSnapsPresent() bool { - found := false - for _, value := range volumeCache { - if len(value.snapList) > 0 { - found = true - } - } - return found -} -func deleteSnap(snapname string) { - for key, value := range volumeCache { - for i, s := range value.snapList { - if s == snapname { - resp := volumeCache[key] - resp.snapList = append(resp.snapList[:i], resp.snapList[i+1:]...) - volumeCache[key] = resp - break - } - } - } -} -func getVolumeNameFromSnap(snap string) string { - for key, value := range volumeCache { - for _, s := range value.snapList { - if snap == s { - return key - } - } - } - return "" -} - -func getSnapNameFromVol(vol string) string { - if len(volumeCache[vol].snapList) > 0 { - return volumeCache[vol].snapList[0] - } - return "" -} - -func writeResp(w http.ResponseWriter, status int, resp interface{}, t *testing.T) { - w.WriteHeader(status) - err := json.NewEncoder(w).Encode(&resp) - if err != nil { - t.Fatal("Failed to write response ", err) - } -} diff --git a/pkg/glusterfs/identityserver.go b/pkg/glusterfs/identityserver.go deleted file mode 100644 index a9b9ff878..000000000 --- a/pkg/glusterfs/identityserver.go +++ /dev/null @@ -1,45 +0,0 @@ -package glusterfs - -import ( - "context" - - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/glog" -) - -// IdentityServer struct of Glusterfs CSI driver with supported methods of CSI identity server spec. -type IdentityServer struct { - *GfDriver -} - -// GetPluginInfo returns metadata of the plugin -func (is *IdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { - resp := &csi.GetPluginInfoResponse{ - Name: glusterfsCSIDriverName, - VendorVersion: glusterfsCSIDriverVersion, - } - glog.V(1).Infof("plugininfo response: %+v", resp) - return resp, nil -} - -// GetPluginCapabilities returns available capabilities of the plugin -func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { - resp := &csi.GetPluginCapabilitiesResponse{ - Capabilities: []*csi.PluginCapability{ - { - Type: &csi.PluginCapability_Service_{ - Service: &csi.PluginCapability_Service{ - Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, - }, - }, - }, - }, - } - glog.V(1).Infof("plugin capability response: %+v", resp) - return resp, nil -} - -// Probe returns the health and readiness of the plugin -func (is *IdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { - return &csi.ProbeResponse{}, nil -} diff --git a/pkg/glusterfs/nodeserver.go b/pkg/glusterfs/nodeserver.go deleted file mode 100644 index 1ef9f5194..000000000 --- a/pkg/glusterfs/nodeserver.go +++ /dev/null @@ -1,170 +0,0 @@ -package glusterfs - -import ( - "context" - "fmt" - "os" - "strings" - - csi "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/glog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume/util" -) - -// NodeServer struct of Glusterfs CSI driver with supported methods of CSI node server spec. -type NodeServer struct { - *GfDriver -} - -var glusterMounter = mount.New("") - -// NodeStageVolume mounts the volume to a staging path on the node. -func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// NodeUnstageVolume unstages the volume from the staging path -func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// NodePublishVolume mounts the volume mounted to the staging path to the target path -func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { - glog.V(2).Infof("received node publish volume request %+v", req) - - if err := ns.validateNodePublishVolumeReq(req); err != nil { - return nil, err - } - - targetPath := req.GetTargetPath() - - notMnt, err := glusterMounter.IsLikelyNotMountPoint(targetPath) - - if err != nil { - if os.IsNotExist(err) { - if err = os.MkdirAll(targetPath, 0750); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - notMnt = true - } else { - return nil, status.Error(codes.Internal, err.Error()) - } - } - - if !notMnt { - return &csi.NodePublishVolumeResponse{}, nil - } - - mo := req.GetVolumeCapability().GetMount().GetMountFlags() - - if req.GetReadonly() { - mo = append(mo, "ro") - } - gs := req.GetVolumeAttributes()["glusterserver"] - - ep := req.GetVolumeAttributes()["glustervol"] - source := fmt.Sprintf("%s:%s", gs, ep) - - err = glusterMounter.Mount(source, targetPath, "glusterfs", mo) - if err != nil { - if os.IsPermission(err) { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - if strings.Contains(err.Error(), "invalid argument") { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - return nil, status.Error(codes.Internal, err.Error()) - } - - return &csi.NodePublishVolumeResponse{}, nil -} - -func (ns *NodeServer) validateNodePublishVolumeReq(req *csi.NodePublishVolumeRequest) error { - if req == nil { - return status.Error(codes.InvalidArgument, "request cannot be empty") - } - - if req.GetVolumeId() == "" { - return status.Error(codes.InvalidArgument, "NodePublishVolume Volume ID must be provided") - } - - if req.GetTargetPath() == "" { - return status.Error(codes.InvalidArgument, "NodePublishVolume Target Path cannot be empty") - } - - if req.GetVolumeCapability() == nil { - return status.Error(codes.InvalidArgument, "NodePublishVolume Volume Capability must be provided") - } - return nil -} - -// NodeUnpublishVolume unmounts the volume from the target path -func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { - if req == nil { - return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty") - } - - if req.VolumeId == "" { - return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Volume ID must be provided") - } - - if req.TargetPath == "" { - return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Target Path must be provided") - } - - targetPath := req.GetTargetPath() - notMnt, err := glusterMounter.IsLikelyNotMountPoint(targetPath) - if err != nil { - if os.IsNotExist(err) { - return nil, status.Error(codes.NotFound, "targetpath not found") - } - return nil, status.Error(codes.Internal, err.Error()) - - } - - if notMnt { - return nil, status.Error(codes.NotFound, "volume not mounted") - } - - err = util.UnmountPath(req.GetTargetPath(), glusterMounter) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &csi.NodeUnpublishVolumeResponse{}, nil -} - -// NodeGetId returns NodeGetIdResponse for CO. -func (ns *NodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) { - return &csi.NodeGetIdResponse{ - NodeId: ns.GfDriver.NodeID, - }, nil -} - -// NodeGetInfo info -func (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - return &csi.NodeGetInfoResponse{ - NodeId: ns.GfDriver.NodeID, - }, nil -} - -// NodeGetCapabilities returns the supported capabilities of the node server -func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { - // currently there is a single NodeServer capability according to the spec - nscap := &csi.NodeServiceCapability{ - Type: &csi.NodeServiceCapability_Rpc{ - Rpc: &csi.NodeServiceCapability_RPC{ - Type: csi.NodeServiceCapability_RPC_UNKNOWN, - }, - }, - } - glog.V(1).Infof("node capabiilities: %+v", nscap) - return &csi.NodeGetCapabilitiesResponse{ - Capabilities: []*csi.NodeServiceCapability{ - nscap, - }, - }, nil -} diff --git a/pkg/glusterfs/utils/config.go b/pkg/glusterfs/utils/config.go deleted file mode 100644 index aca778f72..000000000 --- a/pkg/glusterfs/utils/config.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -// Common allocation units -const ( - KB int64 = 1000 - MB int64 = 1000 * KB - GB int64 = 1000 * MB - TB int64 = 1000 * GB -) - -// RoundUpSize calculates how many allocation units are needed to accommodate -// a volume of given size. -// RoundUpSize(1500 * 1000*1000, 1000*1000*1000) returns '2' -// (2 GB is the smallest allocatable volume that can hold 1500MiB) -func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { - return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes -} - -// RoundUpToGB rounds up given quantity upto chunks of GB -func RoundUpToGB(sizeBytes int64) int64 { - return RoundUpSize(sizeBytes, GB) -} - -// Config struct fills the parameters of request or user input -type Config struct { - Endpoint string // CSI endpoint - NodeID string // CSI node ID - RestURL string // GD2 endpoint - RestUser string // GD2 user name who has access to create and delete volume - RestSecret string // GD2 user password -} - -//NewConfig returns config struct to initialize new driver -func NewConfig() *Config { - return &Config{} -} diff --git a/pkg/identity/identityserver.go b/pkg/identity/identityserver.go new file mode 100644 index 000000000..92b79429c --- /dev/null +++ b/pkg/identity/identityserver.go @@ -0,0 +1,51 @@ +package identity + +import ( + "context" + + "github.com/gluster/gluster-csi-driver/pkg/command" + + csi "github.com/container-storage-interface/spec/lib/go/csi/v0" +) + +// Server takes a configuration +type Server struct { + *command.Config +} + +// NewServer instantiates a Server +func NewServer(config *command.Config) *Server { + return &Server{ + Config: config, + } +} + +// GetPluginInfo returns metadata of the plugin +func (is *Server) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + resp := &csi.GetPluginInfoResponse{ + Name: is.Name, + VendorVersion: is.Version, + } + return resp, nil +} + +// GetPluginCapabilities returns available capabilities of the plugin +func (is *Server) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + resp := &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + } + return resp, nil +} + +// Probe returns the health and readiness of the plugin +func (is *Server) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { + return &csi.ProbeResponse{}, nil +} diff --git a/pkg/node/driver.go b/pkg/node/driver.go new file mode 100644 index 000000000..32c9b9819 --- /dev/null +++ b/pkg/node/driver.go @@ -0,0 +1,33 @@ +package node + +import ( + "github.com/gluster/gluster-csi-driver/pkg/command" + + "github.com/golang/glog" +) + +// Driver implements command.Driver +type Driver struct { + *command.Config +} + +// New returns a new Driver +func New(config *command.Config) *Driver { + nd := &Driver{} + + if config != nil { + nd.Config = config + } else { + glog.Error("failed to initialize GlusterFS node driver: config is nil") + return nil + } + + glog.V(1).Infof("%s initialized", nd.Desc) + + return nd +} + +// Run runs the driver +func (d *Driver) Run() { + Run(d.Config) +} diff --git a/pkg/node/nodeserver.go b/pkg/node/nodeserver.go new file mode 100644 index 000000000..b2ba01115 --- /dev/null +++ b/pkg/node/nodeserver.go @@ -0,0 +1,198 @@ +package node + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/gluster/gluster-csi-driver/pkg/command" + "github.com/gluster/gluster-csi-driver/pkg/identity" + + api "github.com/container-storage-interface/spec/lib/go/csi/v0" + "github.com/golang/glog" + csi "github.com/kubernetes-csi/drivers/pkg/csi-common" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume/util" +) + +// Server struct of Glusterfs CSI driver with supported methods of CSI node server spec. +type Server struct { + *command.Config +} + +// Run starts the node server +func Run(config *command.Config) { + srv := csi.NewNonBlockingGRPCServer() + srv.Start(config.Endpoint, identity.NewServer(config), nil, NewServer(config)) + srv.Wait() +} + +// NewServer instantiates a Server +func NewServer(config *command.Config) *Server { + ns := &Server{ + Config: config, + } + + return ns +} + +var glusterMounter = mount.New("") + +// NodeStageVolume mounts the volume to a staging path on the node. +func (ns *Server) NodeStageVolume(ctx context.Context, req *api.NodeStageVolumeRequest) (*api.NodeStageVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +// NodeUnstageVolume unstages the volume from the staging path +func (ns *Server) NodeUnstageVolume(ctx context.Context, req *api.NodeUnstageVolumeRequest) (*api.NodeUnstageVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + +// NodePublishVolume mounts the volume mounted to the staging path to the target path +func (ns *Server) NodePublishVolume(ctx context.Context, req *api.NodePublishVolumeRequest) (*api.NodePublishVolumeResponse, error) { + glog.V(2).Infof("request: %+v", req) + + if err := ns.validateNodePublishVolumeReq(req); err != nil { + return nil, err + } + + targetPath := req.GetTargetPath() + + notMnt, err := glusterMounter.IsLikelyNotMountPoint(targetPath) + if err != nil && os.IsNotExist(err) { + err = os.MkdirAll(targetPath, 0750) + notMnt = true + } + if err != nil { + glog.Errorf("error with target path %s: %v", targetPath, err) + return nil, status.Error(codes.Internal, err.Error()) + } + if !notMnt { + return &api.NodePublishVolumeResponse{}, nil + } + + mo := req.GetVolumeCapability().GetMount().GetMountFlags() + + if req.GetReadonly() { + mo = append(mo, "ro") + } + + attrs := req.GetVolumeAttributes() + gs := attrs["glusterserver"] + ep := attrs["glustervol"] + + source := fmt.Sprintf("%s:%s", gs, ep) + + if err = mountGlusterVolume(source, targetPath, mo); err != nil { + return nil, err + } + + return &api.NodePublishVolumeResponse{}, nil +} + +func mountGlusterVolume(source, targetPath string, mountOptions []string) error { + err := glusterMounter.Mount(source, targetPath, "glusterfs", mountOptions) + if err != nil { + glog.Errorf("error mounting volume: %v", err) + code := codes.Internal + if os.IsPermission(err) { + code = codes.PermissionDenied + } + if strings.Contains(err.Error(), "invalid argument") { + code = codes.InvalidArgument + } + err = status.Error(code, err.Error()) + } + + return err +} + +func (ns *Server) validateNodePublishVolumeReq(req *api.NodePublishVolumeRequest) error { + if req == nil { + return status.Error(codes.InvalidArgument, "request cannot be empty") + } + + if req.GetVolumeId() == "" { + return status.Error(codes.InvalidArgument, "NodePublishVolume Volume ID must be provided") + } + + if req.GetTargetPath() == "" { + return status.Error(codes.InvalidArgument, "NodePublishVolume Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return status.Error(codes.InvalidArgument, "NodePublishVolume Volume Capability must be provided") + } + return nil +} + +// NodeUnpublishVolume unmounts the volume from the target path +func (ns *Server) NodeUnpublishVolume(ctx context.Context, req *api.NodeUnpublishVolumeRequest) (*api.NodeUnpublishVolumeResponse, error) { + if req == nil { + return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty") + } + + if req.VolumeId == "" { + return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Volume ID must be provided") + } + + if req.TargetPath == "" { + return nil, status.Error(codes.InvalidArgument, "NodeUnpublishVolume Target Path must be provided") + } + + targetPath := req.GetTargetPath() + notMnt, err := glusterMounter.IsLikelyNotMountPoint(targetPath) + if err != nil { + if os.IsNotExist(err) { + return nil, status.Errorf(codes.NotFound, "target path [%s] not found", targetPath) + } + return nil, status.Error(codes.Internal, err.Error()) + + } + + if notMnt { + return nil, status.Error(codes.NotFound, "volume not mounted") + } + + err = util.UnmountPath(req.GetTargetPath(), glusterMounter) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &api.NodeUnpublishVolumeResponse{}, nil +} + +// NodeGetId returns NodeGetIdResponse for CO. +func (ns *Server) NodeGetId(ctx context.Context, req *api.NodeGetIdRequest) (*api.NodeGetIdResponse, error) { + return &api.NodeGetIdResponse{ + NodeId: ns.NodeID, + }, nil +} + +// NodeGetInfo info +func (ns *Server) NodeGetInfo(ctx context.Context, req *api.NodeGetInfoRequest) (*api.NodeGetInfoResponse, error) { + return &api.NodeGetInfoResponse{ + NodeId: ns.NodeID, + }, nil +} + +// NodeGetCapabilities returns the supported capabilities of the node server +func (ns *Server) NodeGetCapabilities(ctx context.Context, req *api.NodeGetCapabilitiesRequest) (*api.NodeGetCapabilitiesResponse, error) { + // currently there is a single Server capability according to the spec + nscap := &api.NodeServiceCapability{ + Type: &api.NodeServiceCapability_Rpc{ + Rpc: &api.NodeServiceCapability_RPC{ + Type: api.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + } + glog.V(1).Infof("node capabilities: %+v", nscap) + return &api.NodeGetCapabilitiesResponse{ + Capabilities: []*api.NodeServiceCapability{ + nscap, + }, + }, nil +} diff --git a/scripts/build-drivers.sh b/scripts/build-drivers.sh new file mode 100755 index 000000000..e3ae7abc7 --- /dev/null +++ b/scripts/build-drivers.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e + +DRIVERS=( "${@}" ) +BUILDDIR="${BUILDDIR:-build}" + +if [ ${#DRIVERS[@]} -eq 0 ]; then + DRIVERS=( $(ls cmd) ) +fi + +mkdir -p "${BUILDDIR}" + +while [ ${#DRIVERS[@]} -ne 0 ]; do + DRIVER="${DRIVERS[0]}" + CGO_ENABLED=0 GOOS=linux go build -ldflags '-extldflags "-static"' -o "${BUILDDIR}/${DRIVER}-csi-driver" "cmd/${DRIVER}/main.go" + ldd "${BUILDDIR}/${DRIVER}-csi-driver" | grep -q "not a dynamic executable" + DRIVERS=( "${DRIVERS[@]:1}" ) +done