Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: change debian default from bullseye to bookworm #783

Merged
merged 1 commit into from
Aug 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions kubeinit/roles/kubeinit_libvirt/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ kubeinit_libvirt_source_keystore_dir: "/home/{{ kubeinit_libvirt_cloud_user }}/.
kubeinit_libvirt_source_pubkey_file: "id_{{ kubeinit_ssh_keytype }}.pub"

kubeinit_libvirt_centos_release: "20240703.1"
#kubeinit_libvirt_debian_release: "12"
#kubeinit_libvirt_debian_codename: "bookworm"
kubeinit_libvirt_debian_release: "11"
kubeinit_libvirt_debian_codename: "bullseye"
kubeinit_libvirt_debian_release: "12"
kubeinit_libvirt_debian_codename: "bookworm"
#kubeinit_libvirt_debian_release: "11"
#kubeinit_libvirt_debian_codename: "bullseye"
kubeinit_libvirt_ubuntu_release: "jammy"

kubeinit_libvirt_cloud_images:
Expand Down
107 changes: 49 additions & 58 deletions kubeinit/roles/kubeinit_libvirt/tasks/cleanup_hypervisors.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,55 +39,6 @@
# Cleanup all resources left over from previous cluster deployment
#

- name: Get list of existing remote system connection definitions
ansible.builtin.shell: |
set -eo pipefail
podman --remote system connection list | sed -e 1d -e 's/[* ].*//'
args:
executable: /bin/bash
register: _result_connections
changed_when: "_result_connections.rc == 0"

# - name: Remove any existing remote system connection definition for bastion hypervisor
# ansible.builtin.command: |
# podman --remote system connection remove {{ item }}
# loop: "{{ _result_connections.stdout_lines | list }}"
# register: _result
# changed_when: "_result.rc == 0"

- name: Remove all host ssh_connection_address entries from known_hosts
ansible.builtin.known_hosts:
name: "{{ hostvars[item].ssh_connection_address }}"
state: absent
loop: "{{ groups['all_hosts'] }}"

# - name: Reset ssh keys on localhost
# ansible.builtin.known_hosts:
# name: "{{ item[1] }}"
# state: absent
# loop: "{{ kubeinit_cluster_hostvars.node_aliases }}"

# - name: Reset ssh keys in hypervisors
# ansible.builtin.known_hosts:
# name: "{{ node_alias }}"
# state: absent
# loop: "{{ groups['all_hosts'] | product(kubeinit_cluster_hostvars.node_aliases | flatten | unique) }}"
# vars:
# kubeinit_deployment_node_name: "{{ item[0] }}"
# node_alias: "{{ item[1] }}"
# delegate_to: "{{ kubeinit_deployment_node_name }}"

- name: Remove any existing ssh tunnels on bastion host
ansible.builtin.shell: |
set -eo pipefail
hosts=$(for file in ~/.ssh/cm-root*; do echo $file; done | sed -n -e 's;.*@\(.*\):22;\1;p')
for host in $hosts; do ssh -O exit -S "~/.ssh/cm-%r@%h:%p" $host || true; done
args:
executable: /bin/bash
register: _result
changed_when: "_result.rc == 0"
delegate_to: "{{ kubeinit_bastion_host }}"

- name: Find any service pods from previous deployments
containers.podman.podman_pod_info:
loop: "{{ groups['all_service_nodes'] }}"
Expand All @@ -101,15 +52,7 @@

- name: Set facts about those pods
ansible.builtin.set_fact:
orphaned_pods: "{{ (orphaned_pods | default([])) + ([service_node] | product(pods)) }}"
loop: "{{ _result_podinfo.results }}"
loop_control:
loop_var: pod_info_result
vars:
kubeinit_deployment_node_name: "{{ hostvars[pod_info_result.service_node].container_host }}"
service_node: "{{ pod_info_result.service_node }}"
pods: "{{ pod_info_result.pods | default([]) }}"
when: hostvars[kubeinit_deployment_node_name].podman_is_installed is defined and hostvars[kubeinit_deployment_node_name].podman_is_installed
orphaned_pods: "{{ _result_podinfo.results | map(attribute='service_node', default=[]) | product(_result_podinfo.results | map(attribute='pods', default=[]) | flatten) }}"

- name: Set facts about pods for this cluster
ansible.builtin.set_fact:
Expand Down Expand Up @@ -347,6 +290,54 @@
tasks_from: cleanup_libvirt.yml
public: true

- name: Remove any existing ssh tunnels on bastion host
ansible.builtin.shell: |
set -eo pipefail
hosts=$(for file in ~/.ssh/cm-root*; do echo $file; done | sed -n -e 's;.*@\(.*\):22;\1;p')
for host in $hosts; do ssh -O exit -S "~/.ssh/cm-%r@%h:%p" $host || true; done
args:
executable: /bin/bash
register: _result
changed_when: "_result.rc == 0"
delegate_to: "{{ kubeinit_bastion_host }}"

- name: Get list of existing remote system connection definitions
ansible.builtin.shell: |
set -eo pipefail
podman --remote system connection list | sed -e 1d -e 's/[* ].*//'
args:
executable: /bin/bash
register: _result_connections
changed_when: "_result_connections.rc == 0"

- name: Remove any existing remote system connection definition for bastion hypervisor
ansible.builtin.command: |
podman --remote system connection remove {{ item }}
loop: "{{ _result_connections.stdout_lines | list }}"
register: _result
changed_when: "_result.rc == 0"
when: item in groups['hypervisor_hosts']

- name: Get list of existing remote system connection definitions
ansible.builtin.shell: |
set -eo pipefail
podman --remote system connection list | sed -e 1d -e 's/[* ].*//'
args:
executable: /bin/bash
register: _result
changed_when: "_result.rc == 0"

- name: Collect all of the host aliases
ansible.builtin.set_fact:
all_host_aliases: "{{ (all_host_aliases | default([])) + item }}"
loop: "{{ groups['all_hosts'] | zip(groups['all_hosts'] | map('extract', hostvars, 'ansible_host'), groups['all_hosts'] | map('extract', hostvars, 'ssh_connection_address')) }}"

- name: Remove hypervisors from known hosts list
ansible.builtin.known_hosts:
name: "{{ item }}"
state: absent
loop: "{{ all_host_aliases }}"

- name: Include hosts and stop the deployment if required
block:
- name: Add task-cleanup-hypervisors to tasks_completed
Expand Down
18 changes: 14 additions & 4 deletions kubeinit/roles/kubeinit_libvirt/tasks/deploy_debian_guest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
- name: Create the network interface template for networking details
ansible.builtin.template:
src: "debian-network-config-enp1s0.j2"
dest: "{{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/enp1s0"
dest: "{{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/10_network.cfg"
mode: "0644"

- name: Create the hosts template
Expand Down Expand Up @@ -58,6 +58,7 @@
- name: "Inject virt-customize assets"
ansible.builtin.shell: |
virt-customize -a {{ kubeinit_libvirt_target_image_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}.qcow2 \
--run-command "ssh-keygen -A" \
--run-command "sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config" \
--run-command "sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config" \
--run-command "dpkg-reconfigure openssh-server" \
Expand All @@ -71,8 +72,7 @@
--run-command 'mkdir -p {{ kubeinit_libvirt_source_keystore_dir }}' \
--ssh-inject {{ kubeinit_libvirt_cloud_user }}:file:{{ kubeinit_libvirt_source_keystore_dir }}/{{ kubeinit_libvirt_source_pubkey_file }} \
{% endif %}
--run-command 'mkdir -p /etc/network/interfaces.d' \
--copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/enp1s0:/etc/network/interfaces.d \
--copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/10_network.cfg:/etc/cloud/cloud.cfg.d \
--copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/hosts.debian.tmpl:/etc/cloud/templates \
--copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/90_dpkg.cfg:/etc/cloud/cloud.cfg.d \
--copy-in {{ kubeinit_libvirt_hypervisor_tmp_dir }}/{{ hostvars[kubeinit_deployment_node_name].guest_name }}/cloud.cfg:/etc/cloud
Expand All @@ -88,7 +88,7 @@
--name {{ hostvars[kubeinit_deployment_node_name].guest_name }} \
--memory memory={{ hostvars[kubeinit_deployment_node_name].ram|int // 1024 }} \
--vcpus {{ hostvars[kubeinit_deployment_node_name].vcpus }},maxvcpus={{ hostvars[kubeinit_deployment_node_name].maxvcpus }} \
--os-variant ubuntu18.04 \
--os-variant debian11 \
--autostart \
--network network={{ kubeinit_cluster_hostvars.network_name }},mac={{ hostvars[kubeinit_deployment_node_name].mac }},virtualport.parameters.interfaceid={{ hostvars[kubeinit_deployment_node_name].interfaceid }},target.dev=veth0-{{ hostvars[kubeinit_deployment_node_name].ansible_host | ansible.utils.ip4_hex }},model=virtio \
--graphics none \
Expand Down Expand Up @@ -120,6 +120,15 @@
- name: Update packages
ansible.builtin.command: apt update

- name: Set locale
ansible.builtin.command: localectl set-locale en_US.UTF-8

- name: Restart the cloud-config service
ansible.builtin.service:
name: "cloud-config"
state: restarted
enabled: yes

- name: Install resolvconf
ansible.builtin.package:
name: resolvconf
Expand Down Expand Up @@ -168,6 +177,7 @@
name: "systemd-resolved"
state: restarted
enabled: yes
when: kubeinit_libvirt_debian_codename == 'bullseye'

- name: Regenerate the resolv.conf
ansible.builtin.shell: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
--name {{ hostvars[kubeinit_deployment_node_name].guest_name }} \
--memory memory={{ hostvars[kubeinit_deployment_node_name].ram|int // 1024 }} \
--vcpus {{ hostvars[kubeinit_deployment_node_name].vcpus }},maxvcpus={{ hostvars[kubeinit_deployment_node_name].maxvcpus }} \
--os-variant ubuntu18.04 \
--os-variant ubuntujammy \
--autostart \
--network network={{ kubeinit_cluster_hostvars.network_name }},mac={{ hostvars[kubeinit_deployment_node_name].mac }},virtualport.parameters.interfaceid={{ hostvars[kubeinit_deployment_node_name].interfaceid }},target.dev=veth0-{{ hostvars[kubeinit_deployment_node_name].ansible_host | ansible.utils.ip4_hex }},model=virtio \
--graphics none \
Expand Down
Original file line number Diff line number Diff line change
@@ -1,29 +1,30 @@
## template:jinja
# This file is rendered using a template from a KubeInit role
# This file will be overridden each time the playbook runs
# No not edit directly
# More information at: https://github.com/kubeinit/kubeinit

# This file is only utilized if enabled in cloud-config.
# Specifically, in order to enable it you need to add the
# following to config:
# manage_etc_hosts: True
#
{#
This file (/etc/cloud/templates/hosts.debian.tmpl) is only utilized
if enabled in cloud-config. Specifically, in order to enable it
you need to add the following to config:
manage_etc_hosts: True
-#}
# Your system has configured 'manage_etc_hosts' as True.
# As a result, if you wish for changes to this file to persist
# then you will need to either
# a.) make changes to the master file in /etc/cloud/templates/hosts.redhat.tmpl
# a.) make changes to the master file in /etc/cloud/templates/hosts.debian.tmpl
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 $fqdn $hostname
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
{# The value '{{hostname}}' will be replaced with the local-hostname -#}
127.0.1.1 {% raw %}{{fqdn}} {{hostname}}{% endraw %}

127.0.0.1 localhost

# The following lines are desirable for IPv6 capable hosts
::1 $fqdn $hostname
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

{% for node in groups['all_nodes'] %}
{{ hostvars[node].ansible_host }} {{ node }}.{{ kubeinit_cluster_fqdn }} {{ node }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ hostname: {{ kubeinit_deployment_node_name }}
fqdn: {{ kubeinit_deployment_node_name }}.{{ kubeinit_cluster_fqdn }}
manage_etc_hosts: true

network: {config: disabled}

# run commands
# default: none
# runcmd contains a list of either lists or a string
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@

auto enp1s0
iface enp1s0 inet static
address {{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ kubeinit_cluster_prefix }}
gateway {{ kubeinit_cluster_gateway }}
mtu 1442
network:
version: 2
ethernets:
enp1s0:
addresses:
- "{{ hostvars[kubeinit_deployment_node_name].ansible_host }}/{{ kubeinit_cluster_prefix }}"
mtu: 1442
nameservers:
addresses:
- "{{ kubeinit_cluster_nameserver }}"
routes:
- to: "default"
via: "{{ kubeinit_cluster_gateway }}"
Loading