- Step 1. Setup Virtualbox ENV
- Step 2. Create Master01 VM and Install Ubuntu 24.04 LTS
- 2.1 Update Upgrade package and clone master1 to worker1
- 2.2 Config Network Adapter master1 (Master1 Only)
- 2.3 Config Network Adapter worker1 (Worker1 Only)
- 2.4 Install programs (master1 and worker1)
- Step 3. Clone Master01 to Worker node 01 - 03
- Step 4. Initialize control-plane node
- Step 5. Join with kubernetes cluster
- Step 6. Install helm
- Step 7. Install Kubernetes Dashboard
- Step 8. Install NGINX Gateway Fabric
- Step 9. Deploy example site
- 9.1 cafe-example (http)
- 9.2 https-termination (https)
- Step 10. Horizontal Pod Autoscaler
Server Role | Host Name | Configuration | Network Adapter | IP Address |
---|---|---|---|---|
Master Node | Master01 | 4GB Ram, 4vcpus, 20GB | Bridged Adapter | 192.168.1.yy |
Internal Newtork | 10.0.2.4 | |||
Worker Node | Worker01 | 2GB Ram, 2vcpus, 20GB | Internal Newtork | 10.0.2.5 |
Worker Node | Worker02 | 2GB Ram, 2vcpus, 20GB | Internal Newtork | 10.0.2.6 |
Worker Node | Worker03 | 2GB Ram, 2vcpus, 20GB | Internal Newtork | 10.0.2.7 |
Software Name | Version | Reference |
---|---|---|
Virtualbox | 7.1.6 | https://virtualbox.org |
VirtualBox Extension Pack | 7.1.6 | https://virtualbox.org |
VboxGuestAdditions | 7.1.6 | https://virtualbox.org |
Ubuntu Server | 24.04 LTS | https://ubuntu.com |
containerd.io | 1.7.24 | https://github.com/containerd/containerd |
crictl | 1.32.0 | https://github.com/kubernetes-sigs/cri-tools |
kubernetes | 1.32.0 | https://kubernetes.io/releases/download/ |
calico | 3.29.1 | https://github.com/projectcalico/calico |
helm | 3.16.3 | https://helm.sh/docs/intro/install/ |
Nginx Gateway Fabric | 1.6.0 | https://github.com/nginx/nginx-gateway-fabric |
Metrics Server | 0.7.2 | https://github.com/kubernetes-sigs/metrics-server |
- Install Oracle VirtualBox Extension Pack
- Oracle_VirtualBox_Extension_Pack-7.1.6-165100.vbox-extpack
- Add optical disk
- ubuntu-24.04.1-live-server-arm64.iso
sudo apt update && \
sudo apt upgrade -y && \
sudo apt install net-tools network-manager ssh iproute2 iptables inetutils-ping -y && \
sudo systemctl enable ssh
sudo init 0
sudo apt install -y build-essential dkms linux-headers-$(uname -r)
# Insert VBoxGuestAdditions.iso CD file into Linux guest's CD-ROM drive and mount it.
sudo mount /dev/cdrom /media
cd /media
sudo ./VBoxLinuxAdditions.run
- Clone master1 to worker1
- Start master1 and worker1
- Go to VM Settings
- Select Network menu
- Adapter 1
- Attached to:
Bridged Network
- Name:
en0: WiFi
(Interface ที่เชื่อมต่อกับ Internet)
- Attached to:
- Adapter 2
- Enabel Network Adapter
- Attached to:
Internal Network
- Name:
WUNCANet
(ตั้งชื่อ Internal Network ใหม่เพื่อใช้สื่อสารใน Cluster)
- Adapter 1
- Click
OK
button - Start VM
# ตรวจสอบ Interface ที่มีอยู่ใน MV
ip addr
ifconfig
# ทำการ Up Interface ที่ยังไม่เห็นจากคำสั่ง ifconfig
sudo ifconfig enp0s9 up
# แก้ไขค่าของ Network Interfaces ทั้ง 2
sudo vi /etc/netplan/50-cloud-init.yaml
network:
ethernets:
enp0s8: # Interface ที่เป็น Bridged Adapter
addresses: [192.168.x.yy/24] # Ip ที่อยู่ใน Network เดียวกันกับเครื่อง Host
routes:
- to: default
via: 192.168.x.254
nameservers:
search: [local]
addresses: [192.168.2.153] # Ip ของ DNS Server
dhcp4: false
enp0s9: # Interface ที่เป็น Internal Network
addresses: [10.0.2.4/24]
nameservers:
search: [local]
addresses: [8.8.8.8, 8.8.8.4]
dhcp4: false
version: 2
sudo netplan apply
ifconfig enp0s8
# สำหรับ MacOS ==================
netstat -rn -f inet
sudo route delete 10.0.2.0/24 # ถ้ามีอยู่แล้ว ให้ลบทิ้งก่อน
sudo route add -net 10.0.2.0/24 192.168.x.yy # Ip ของ Bridged Adapter
netstat -rn -f inet
# สำหรับ Windows =================
route print
route delete 10.0.2.0 # ถ้ามีอยู่แล้ว ให้ลบทิ้งก่อน
route add 10.0.2.0 MASK 255.255.255.0 192.168.x.yy # Ip ของ Bridged Adapter
route print
# ให้ VM master1 สามารถทำ ip forward ได้
echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward && \
sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf" && \
sudo sysctl -p
# สำหรับทุก package ที่มาจาก network 10.0.2.0/24 ให้ทำการ Masquerade หลังออกไป (ที่ Interface Internal Network ของ VM master1)
sudo iptables -t nat -L -nv
sudo iptables -t nat -A POSTROUTING -o enp0s8 -s 10.0.2.0/24 -j MASQUERADE
- Go to VM Settings
- Select Network menu
- Adapter 1
- Attached to:
Internal Network
- Name:
WUNCANet
(ใช้ชื่อเดียวกันกับ Internal Network ของ VM master1)
- Attached to:
- Adapter 1
- Click
OK
button - Start VM
# เปลี่ยนชื่อ ของ VM ให้เป็น worker1
sudo hostnamectl set-hostname worker1
# แก้ไขไฟล์ hostname ให้เป็น worker1
sudo vi /etc/hostname
worker1
# แก้ไขค่าของ Network Interface ของ VM worker1
sudo vi /etc/netplan/50-cloud-init.yaml
network:
ethernets:
enp0s8: # Interface ที่เป็น Internal Network
addresses: [10.0.2.5/24]
routes:
- to: default
via: 10.0.2.4
metric: 100
nameservers:
search: [local]
addresses: [8.8.8.8, 8.8.8.4]
dhcp4: false
version: 2
sudo netplan apply
ifconfig enp0s8
# ติดตั้ง Programs ที่จำเป็นต้องใช้ใน VM master1 และ work1
sudo apt update && \
sudo apt upgrade -y && \
sudo apt install gcc make perl build-essential bzip2 tar apt-transport-https ca-certificates curl gpg git -y
sudo swapoff -a && \
sudo sed -i '/swap/ s/^/#/' /etc/fstab && \
sudo rm -f /swap.img && \
systemctl disable swap.target
sudo modprobe overlay && \
sudo modprobe br_netfilter
sudo tee /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
sudo tee /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install containerd.io -y && \
sudo mkdir -p /etc/containerd && \
sudo containerd config default | sudo tee /etc/containerd/config.toml
sudo vi /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
sudo systemctl restart containerd && \
sudo systemctl enable containerd && \
systemctl status containerd
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.32.0/crictl-v1.32.0-linux-$(dpkg --print-architecture).tar.gz
sudo tar zxvf crictl-v1.32.0-linux-$(dpkg --print-architecture).tar.gz -C /usr/local/bin
rm -f crictl-v1.32.0-linux-$(dpkg --print-architecture).tar.gz
sudo tee /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
sudo systemctl restart containerd && \
sudo systemctl status containerd
sudo crictl info
sudo crictl images
sudo crictl ps
sudo crictl pods
sudo crictl stats
sudo mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.32/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.32/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update && \
sudo apt-get install kubelet kubeadm kubectl -y && \
sudo apt-mark hold kubelet kubeadm kubectl && \
sudo systemctl enable --now kubelet
sudo init 0
sudo hostnamectl set-hostname worker2
sudo vi /etc/hostname
worker2
sudo vi /etc/netplan/50-cloud-init.yaml
sudo netplan apply
sudo init 0
sudo hostnamectl set-hostname worker3
sudo vi /etc/hostname
worker3
sudo vi /etc/netplan/50-cloud-init.yaml
sudo netplan apply
sudo init 0
sudo kubeadm init \
--apiserver-advertise-address=10.0.2.4 \
--apiserver-bind-port=6443 \
--control-plane-endpoint=10.0.2.4:6443 \
--pod-network-cidr=192.168.0.0/16 \
--cri-socket=/var/run/containerd/containerd.sock \
--v=5
mkdir -p $HOME/.kube && \
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && \
sudo chown $(id -u):$(id -g) $HOME/.kube/config && \
export KUBECONFIG=/etc/kubernetes/admin.conf && \
sudo chmod -R 755 /etc/kubernetes/admin.conf
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/custom-resources.yaml
# Verify the plugin works.
kubectl calico -h
watch kubectl get pods -n calico-system
nc 127.0.0.1 6443 -v
sudo kubeadm join 10.0.2.4:6443 --token xxxxx.yyyyyyyyyyyyyyyy \
--discovery-token-ca-cert-hash sha256:xyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyxyx
kubectl get nodes -o wide
kubectl get pods -o wide --all-namespaces
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update && \
sudo apt-get install helm
mkdir namespaces && \
cd namespaces && \
mkdir kubernetes-dashboard && \
cd kubernetes-dashboard
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
kubectl -n kubernetes-dashboard get svc -o wide
tee dashboard-adminuser.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard-adminuser.yaml
tee cluster-role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
kubectl apply -f cluster-role.yaml
kubectl -n kubernetes-dashboard create token admin-user
kubectl -n kubernetes-dashboard port-forward --address 0.0.0.0 svc/kubernetes-dashboard-kong-proxy 8443:443 > /dev/null &
https://10.0.2.4:8443
kubectl kustomize "https://github.com/nginx/nginx-gateway-fabric/config/crd/gateway-api/standard?ref=v1.6.0" | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/nginx/nginx-gateway-fabric/v1.6.0/deploy/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/nginx/nginx-gateway-fabric/v1.6.0/deploy/default/deploy.yaml
kubectl get pods -n nginx-gateway -o wide
kubectl get svc nginx-gateway -n nginx-gateway
kubectl patch svc nginx-gateway -n nginx-gateway -p '{"spec": {"externalIPs": ["10.0.2.4"], "externalTrafficPolicy": "Cluster"}}'
kubectl get svc nginx-gateway -n nginx-gateway -o json
kubectl describe svc nginx-gateway -n nginx-gateway
# gatewayClass detail
kubectl get gatewayclass -A
kubectl describe gatewayclass nginx
# geteway detail
kubectl get gateway -A
kubectl describe gateway gateway
# httproutes detail
kubectl get httproutes -A
kubectl describe httproutes
# service detail
kubectl get svc -A
kubectl get svc -n nginx-gateway -o wide
kubectl get svc -n default -o wide
cd namespace
git clone -b release-1.6 https://github.com/nginx/nginx-gateway-fabric.git
cd nginx-gateway-fabric
cd examples/cafe-example
kubectl apply -f cafe.yaml
kubectl -n default get pods -o wide
kubectl apply -f gateway.yaml
kubectl apply -f cafe-routes.yaml
kubectl describe gateway gateway
curl --resolve cafe.example.com:80:10.0.2.4 http://cafe.example.com/coffee -v
Server address: 10.12.0.18:80
Server name: coffee-7586895968-r26zn
curl --resolve cafe.example.com:80:10.0.2.4 http://cafe.example.com/tea -v
Server address: 10.12.0.19:80
Server name: tea-7cd44fcb4d-xfw2x
kubectl get pods -n nginx-gateway
kubectl exec -it -n nginx-gateway nginx-gateway-964449b44-c45f4 -c nginx -- nginx -T
cd examples/https-termination
kubectl apply -f cafe.yaml
kubectl apply -f certificate-ns-and-cafe-secret.yaml && \
kubectl apply -f reference-grant.yaml && \
kubectl apply -f gateway.yaml && \
kubectl apply -f cafe-routes.yaml
curl --resolve cafe.example.com:80:10.0.2.4 http://cafe.example.com:80/coffee --include
curl --resolve cafe.example.com:80:10.0.2.4 http://cafe.example.com:80/tea --include
curl --resolve cafe.example.com:443:10.0.2.4 https://cafe.example.com:443/coffee --insecure
curl --resolve cafe.example.com:443:10.0.2.4 https://cafe.example.com:443/tea --insecure
kubectl delete -f reference-grant.yaml
curl --resolve cafe.example.com:443:10.0.2.4 https://cafe.example.com:443/coffee --insecure -vvv
kubectl describe gateway gateway
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.2/components.yaml
kubectl patch deployment metrics-server -n kube-system \
--type='json' \
-p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--kubelet-insecure-tls"}]'
# kubectl patch deployment metrics-server -n kube-system \
# --type='json' \
# -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/-", "value": "--metric-resolution=90s"}]'
kubectl get apiservices
kubectl top nodes
kubectl top pods
kubectl top pod my-nginx
cd namespaces
mkdir hpa
cd hpa
# ------------------------------------
tee cafe.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: coffee
spec:
replicas: 1
selector:
matchLabels:
app: coffee
template:
metadata:
labels:
app: coffee
spec:
containers:
- name: coffee
image: nginxdemos/nginx-hello:plain-text
ports:
- containerPort: 8080
resources:
limits:
memory: "200Mi"
cpu: "200m"
requests:
memory: "100Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: coffee
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: coffee
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: tea
spec:
replicas: 1
selector:
matchLabels:
app: tea
template:
metadata:
labels:
app: tea
spec:
containers:
- name: tea
image: nginxdemos/nginx-hello:plain-text
ports:
- containerPort: 8080
resources:
limits:
memory: "200Mi"
cpu: "200m"
requests:
memory: "100Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: tea
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: tea
EOF
# ----------------------------------
kubectl apply -f cafe.yaml
kubectl autoscale deployment coffee --cpu-percent=20 --min=1 --max=5
kubectl get hpa coffee --watch
# Load test HPA
wrk -t10 -c100 -d30s https://cafe.example.com/coffee
# Load test none HPA
wrk -t10 -c100 -d30s https://cafe.example.com/tea
kubectl get deploy coffee -w
kubectl delete hpa coffee
tee hpa.yaml <<EOF
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: coffee
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: coffee
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 20
EOF
kubectl apply -f hpa.yaml
- Md. Mehedi Hasan
- May 26, 2024
- terngr
- Jul 8, 2023
- Mongkol Thongkraikaew
- Jan 22, 2023
- Mongkol Thongkraikaew
- Jan 30, 2023
- Mongkol Thongkraikaew
- Feb 22, 2023
- openlandscape.cloud
- Nov 2, 2022
- Nopnithi Khaokaew
- Apr 1, 2024
- Kedarnath Grandhe
- Jul 10, 2024