Skip to content

Commit

Permalink
ci: support create cluster and run conformance tests on volcengine
Browse files Browse the repository at this point in the history
- Add scripts to install kubernetes cluster on volcengine.
- Add scripts to run conformance tests.
  • Loading branch information
ruicao93 committed May 24, 2023
1 parent 60e2242 commit a844e18
Show file tree
Hide file tree
Showing 27 changed files with 984 additions and 5 deletions.
10 changes: 10 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,13 @@
*.db

/pkg/utils/logger/log_test.log

# ci files
ci/playbook/kube
ci/output
ci/tf/.terraform
ci/tf/.terraform.lock.hcl
ci/tf/*.tfstate
ci/tf/*.tfstate.backup
ci/tf/hosts.yaml
ci/tf/values.yaml
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ BUILD_INFO=-ldflags="-X main.BuildInfo=$(VERSION)@$(BRANCH)_$(DATE)"
# BUILD FLAGS
CELLO_META ?=

BUILD_ARGS =
BUILD_ARGS = --build-arg HTTPS_PROXY=$(HTTPS_PROXY) --build-arg GOPROXY=$(GOPROXY)
ifdef GOPROXY
BUILD_ARGS+=--build-arg GOPROXY=$(GOPROXY)
endif
Expand Down
28 changes: 28 additions & 0 deletions ci/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@

# Create kubernetes cluster

Edit cluster variables in file `ci/tf/example.tfvars` and run:

``` bash
./provision.sh
```
kubectl config file locates at `ci/kube/config-public`.

```bash
export KUBECONFIG=ci/kube/config-public
kubectl get nodes -A -o wide
```

# Destroy kubernetes cluster

Uninstall Cello and then run:

``` bash
./destroy.sh
```

# Run conformance tests

``` bash
./conformance_test.sh
```
49 changes: 49 additions & 0 deletions ci/conformance_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Copyright 2023 The Cello Authors

set -x

THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
TEST_OUTPUT=$THIS_DIR/"output"
SONOBUOY_VRSION="v0.56.16"
SONOBUOY="go run github.com/vmware-tanzu/sonobuoy@$SONOBUOY_VRSION"


DEFAULT_E2E_SIG_NETWORK_CONFORMANCE="\[sig-network\].*Conformance"
DEFAULT_E2E_SIG_NETWORK_SKIP="\[Slow\]|\[Serial\]|\[Disruptive\]|\[GCE\]|\[Feature:.+\]|\[Feature:IPv6DualStack\]|\[Feature:IPv6DualStackAlphaFeature\]|should create pod that uses dns|should provide Internet connection for containers|\
HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol"

extra_args=""
if [ -n "$SONOBUOY_IMAGE" ]; then
extra_args="$extra_args --sonobuoy-image $SONOBUOY_IMAGE"
fi
if [ -n "$CONFORMANCE_IMAGE" ]; then
extra_args="$extra_args --kube-conformance-image $CONFORMANCE_IMAGE"
fi
if [ -n $SYSTEMD_LOGS_IMAGE ]; then
extra_args="$extra_args --systemd-logs-image $SYSTEMD_LOGS_IMAGE"
fi
if [ -n $E2E_REPO_CONFIG ]; then
extra_args="$extra_args --e2e-repo-config $E2E_REPO_CONFIG"
fi
pushd $THIS_DIR > /dev/null
$SONOBUOY run \
--wait \
--e2e-focus "$DEFAULT_E2E_SIG_NETWORK_CONFORMANCE" \
--e2e-skip "$DEFAULT_E2E_SIG_NETWORK_SKIP" \
$extra_args

mkdir -f $TEST_OUTPUT
results_path=$($SONOBUOY retrieve $TEST_OUTPUT)
results=$($SONOBUOY results $results_path --plugin e2e)
echo "$results" > $TEST_OUTPUT/test_summary.log
echo "$($SONOBUOY results $results_path --plugin e2e --mode=detailed | jq 'select(.status=="passed" or .status=="failed)')" > $TEST_OUTPUT/tests.log
echo "$($SONOBUOY results $results_path --plugin e2e --mode=detailed | jq 'select(.status=="passed")')" > $TEST_OUTPUT/passed_tests.log
echo "$($SONOBUOY results $results_path --plugin e2e --mode=detailed | jq 'select(.status=="failed")')" > $TEST_OUTPUT/failed_tests.log
echo "$($SONOBUOY results $results_path --plugin e2e --mode=detailed | jq 'select(.status=="skipped")')" > $TEST_OUTPUT/skpped_tests.log
if [[ ! $results == *"Failed: 0"* ]]; then
echo "Test failed!"
exit 1
fi
echo "Test successfully!"
exit 0
56 changes: 56 additions & 0 deletions ci/destroy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#!/usr/bin/env bash
set -e

THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"

if ! volcengine-cli -h &> /dev/null; then
echo "volcengine-cli is not installed, please install it first."
echo "See: https://github.com/volcengine/volcengine-cli"
exit 1
fi

pushd $THIS_DIR/tf > /dev/null
# Remove secondary interfaces created by Cello
source example.tfvars
export VOLCENGINE_ACCESS_KEY="$access_key"
export VOLCENGINE_SECRET_KEY="$secret_key"
export VOLCENGINE_REGION=cn-bejing
vpc_id=$(terraform show -json | jq -r '.values.root_module.resources[] | select(.address=="volcengine_vpc.vpc_cello") | .values.vpc_id')
interfaces=$(volcengine-cli vpc DescribeNetworkInterfaces --Type "secondary" --VpcId $vpc_id --TagFilters.1.Key "k8s:cello:created-by" --TagFilters.1.Values.1 "cello")
echo "$interfaces" | jq -r '.Result.NetworkInterfaceSets[].NetworkInterfaceId' | while read -r interface_id;
do
instance_id=$(volcengine-cli vpc DescribeNetworkInterfaceAttributes --NetworkInterfaceId $interface_id | jq -r '.Result.DeviceId')
set +e
max_retries=10
count=0
if [ -n "$instance_id" ]; then
while [ "$count" -lt "$max_retries" ]; do
volcengine-cli vpc DetachNetworkInterface --NetworkInterfaceId $interface_id --InstanceId $instance_id
if [ "$?" -eq "0" ]; then
break
fi
count=$((counter+1))
sleep 2
done
if [ "$count" -eq "$max_retries" ]; then
exit 1
fi
fi
# Retry multiple times since interface may at detaching status
count=0
while [ "$count" -lt "$max_retries" ]; do
volcengine-cli vpc DeleteNetworkInterface --NetworkInterfaceId $interface_id
if [ "$?" -eq "0" ]; then
break
fi
count=$((counter+1))
sleep 2
done
if [ "$count" -eq "$max_retries" ]; then
exit 1
fi
set -e
done

terraform destroy -auto-approve -var-file=example.tfvars
popd > /dev/null
13 changes: 13 additions & 0 deletions ci/playbook/inventory.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
control-plane:
hosts:
172.16.1.128:
workers:
hosts:
172.16.1.127:
all:
vars:
ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand="ssh -W %h:%p -q -i /home/rcao/test/cello-tf/cello-ecs.pem [email protected]"
ansible_ssh_private_key_file: /home/rcao/test/cello-tf/cello-ecs.pem
host_key_checking: false
ansible_user: root
k8s_api_server_ip: 180.184.33.145
9 changes: 9 additions & 0 deletions ci/playbook/k8s.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
- hosts: control-plane
gather_facts: no
roles:
- control-plane
- hosts: workers
gather_facts: no
roles:
- worker
10 changes: 10 additions & 0 deletions ci/playbook/push_image.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
- hosts: all
gather_facts: no
tasks:
- name: Copy image to worker
copy:
src: /tmp/cello.tar
dest: /tmp/cello.tar
- name: Load image
command: ctr -n k8s.io images import /tmp/cello.tar
4 changes: 4 additions & 0 deletions ci/playbook/roles/common/handlers/main.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
- name: kubelet status
service:
name: kubelet
state: started
52 changes: 52 additions & 0 deletions ci/playbook/roles/common/tasks/containerd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
- name: Load br_netfilter kernel module
modprobe:
name: br_netfilter
state: present

- name: Configure sysctl
sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf
reload: yes
with_items:
- { name: net.bridge.bridge-nf-call-iptables, value: 1 }
- { name: net.ipv4.ip_forward, value: 1 }
- { name: net.bridge.bridge-nf-call-ip6tables, value: 1 }

- name: Populate service facts
service_facts:

# https://developer.volcengine.com/articles/7132008672707739662
- name: Setup docker gpg
shell: |
mkdir -p /etc/apt/keyrings
curl -fsSL http://mirrors.ivolces.com/docker/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] http://mirrors.ivolces.com/docker/linux/debian $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update -y
when: '"containerd" not in services'

- name: Install containerd
apt:
name:
- containerd.io
state: present
register: containerd

- name: Wait containerd started
service:
name: containerd
state: started

- name: Create containerd daemon configuration from template
template:
src: templates/containerd.conf.j2
dest: /etc/containerd/config.toml
register: containerd_config

- name: Restart containerd
service:
name: containerd
state: restarted
when: containerd_config.changed
40 changes: 40 additions & 0 deletions ci/playbook/roles/common/tasks/kube.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# https://developer.volcengine.com/articles/7132385421970915358
- name: Add an apt signing key for Kubernetes
apt_key:
url: http://mirrors.ivolces.com/kubernetes/apt/doc/apt-key.gpg
state: present

- name: Adding apt repository for Kubernetes
apt_repository:
repo: deb http://mirrors.ivolces.com/kubernetes/apt/ kubernetes-xenial main
state: present
filename: /etc/apt/sources.list.d/kubernetes.list

- name: Install Kubernetes binaries
apt:
name: "{{ packages }}"
state: present
update_cache: yes
force_apt_get: yes
vars:
packages:
- kubelet=1.24.10-00
- kubeadm=1.24.10-00
- kubectl=1.24.10-00
notify:
- kubelet status

- name: Configure node ip
lineinfile:
path: /etc/default/kubelet
line: KUBELET_EXTRA_ARGS=--node-ip={{ inventory_hostname }}
state: present
create: yes
register: kubelet_config

- name: Restart kubelet
service:
name: kubelet
daemon_reload: yes
state: restarted
when: kubelet_config.changed
9 changes: 9 additions & 0 deletions ci/playbook/roles/common/tasks/main.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
- name: Wait ssh port ready
wait_for:
host: "{{ k8s_api_server_ip }}"
port: 22
timeout: 300
state: started
connection: local
- import_tasks: containerd.yaml
- import_tasks: kube.yaml
Loading

0 comments on commit a844e18

Please sign in to comment.