This commit is contained in:
Николай Зарецкий 2024-10-29 06:15:14 +03:00
parent 8b09dcdba7
commit 97fb1f258e
36 changed files with 889 additions and 0 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

BIN
Terraform/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,44 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.2"
constraints = ">= 2.1.0"
hashes = [
"h1:IyFbOIO6mhikFNL/2h1iZJ6kyN3U00jgkpCLUCThAfE=",
"zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511",
"zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea",
"zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0",
"zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b",
"zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038",
"zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4",
"zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464",
"zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b",
"zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e",
"zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1",
]
}
provider "registry.terraform.io/telmate/proxmox" {
version = "3.0.1-rc4"
constraints = "3.0.1-rc4"
hashes = [
"h1:tcfqcTB5TDQKSGrWksACZdFIX6ig72i++OYaC8EncOU=",
"zh:1070aff02aebeadf130368b0082e76d976f61464b3bb29c1c5a7866bb14c7380",
"zh:3cd96c232a12cf3bbb0e874508e5ff14116ef347d60db20db17ad87bb161ee22",
"zh:4f75954f3e68159ed969e3eac27485166103058eff3f99186d805816c6f8eb66",
"zh:55572fd22f7c62813a691fe0d017b2a57a34f3b4e1c40af6c2197731878ebf84",
"zh:6536402b2eff0a754ff975c39318c3c0b47dfa2dc4461d34a8c55ba493288d9f",
"zh:735f4283286cb78fe28b4ad001771c460f1963ee640e027467eb199d80a6c257",
"zh:90a675455c5812d90acbf44bfee347c2318b13565c68bcf64b452dbe6c2a629a",
"zh:9bbfe89d3f0997a26d7636d5c2d7244beccf92371d17073583299b1b74e1ab9c",
"zh:9ed8ecb50c4ed8555ffe1544325de07db678e2877f7c4637fbfaf02d5f004100",
"zh:b1e362ebd234aa82a38ffcfa4e74295e8a23811edff8af88f79372ba18ef0918",
"zh:c652faede363a91ad3a148cdd1b3d9c3ab8bac1b94d92ce89eb6e1ddadc99cc1",
"zh:d803958e5e465095cc0d5741abf0abd80b5fd3d1c076b40880a136e737bb06d0",
"zh:fa12bd372e39c8ac6295503f88884f328971834e109fcc015322fc9ab5fe858f",
"zh:fb7abe461d36f9868a0a6728320e482ecd54e047c2876dce85d5c2143eba3b8f",
]
}

128
Terraform/main.tf Normal file
View File

@ -0,0 +1,128 @@
resource "proxmox_vm_qemu" "proxmox_vm_master" {
for_each = { for i, ip in var.master_ips : i => ip }
name = "k3s-master-${each.key}"
target_node = var.pm_node_name
clone = var.tamplate_vm_name
os_type = "cloud-init"
agent = 1
memory = var.num_k3s_masters_mem
cores = 4
cpu = "host"
vcpus = 1
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
disk {
slot = "scsi0"
size = "50G"
type = "disk"
storage = "local-lvm"
iothread = true
}
disk {
slot = "scsi1"
type = "cloudinit"
storage = "local-lvm"
backup = true
}
full_clone = true
ciuser = "root"
cipassword = "8148"
sshkeys = file("/Users/nikolajzareckij/.ssh/id_rsa.pub")
cicustom = "user=local:snippets/cloud_init_master_${each.key}.yml"
ipconfig0 = "ip=${each.value}/${var.networkrange},gw=${var.gateway}"
lifecycle {
ignore_changes = [
ciuser,
sshkeys,
disk,
network
]
}
}
resource "proxmox_vm_qemu" "proxmox_vm_workers" {
for_each = { for i, ip in var.worker_ips : i => ip }
name = "k3s-worker-${each.key}"
target_node = var.pm_node_name
clone = var.tamplate_vm_name
os_type = "cloud-init"
agent = 1
memory = var.num_k3s_nodes_mem
cores = 4
cpu = "host"
vcpus = 1
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
disk {
slot = "scsi0"
size = "50G"
type = "disk"
storage = "local-lvm"
iothread = true
}
disk {
slot = "scsi1"
type = "cloudinit"
storage = "local-lvm"
backup = true
}
full_clone = true
ciuser = "root"
cipassword = "8148"
sshkeys = file("/Users/nikolajzareckij/.ssh/id_rsa.pub")
cicustom = "user=local:snippets/cloud_init_worker_${each.key}.yml"
ipconfig0 = "ip=${each.value}/${var.networkrange},gw=${var.gateway}"
lifecycle {
ignore_changes = [
ciuser,
sshkeys,
disk,
network
]
}
}
locals {
k8s_config = templatefile("./templates/k8s.tpl", {
k3s_master_ip = join("\n", [for instance in proxmox_vm_qemu.proxmox_vm_master : join("", [instance.default_ipv4_address, " ansible_ssh_private_key_file=", var.pvt_key])]),
k3s_node_ip = join("\n", [for instance in proxmox_vm_qemu.proxmox_vm_workers : join("", [instance.default_ipv4_address, " ansible_ssh_private_key_file=", var.pvt_key])])
})
}
resource "local_file" "k8s_file" {
content = local.k8s_config
filename = "../inventory/my-cluster/hosts.ini"
}
resource "local_file" "var_file" {
source = "/Users/nikolajzareckij/Documents/homelab-k3s/inventory/group_vars/all.yml"
filename = "/Users/nikolajzareckij/Documents/homelab-k3s/inventory/my-cluster/group_vars/all.yml"
}
resource "local_file" "cloud_init_master" {
for_each = { for i, ip in var.master_ips : i => ip }
content = templatefile("${path.module}./snippets/cloud_init_master.yml", {
hostname = "k3s-master-${each.key}",
})
filename = "/Users/nikolajzareckij/Documents/homelab-k3s/snippets/cloud_init_master_${each.key}.yml"
}
resource "local_file" "cloud_init_worker" {
for_each = { for i, ip in var.worker_ips : i => ip }
content = templatefile("${path.module}./snippets/cloud_init_worker.yml", {
hostname = "k3s-worker-${each.key}",
})
filename = "/Users/nikolajzareckij/Documents/homelab-k3s/snippets/cloud_init_worker_${each.key}.yml"
}

7
Terraform/output.tf Normal file
View File

@ -0,0 +1,7 @@
output "Master-IPS" {
value = [for vm in proxmox_vm_qemu.proxmox_vm_master : vm.ipconfig0]
}
output "worker-IPS" {
value = [for vm in proxmox_vm_qemu.proxmox_vm_workers : vm.ipconfig0]
}

19
Terraform/provider.tf Normal file
View File

@ -0,0 +1,19 @@
terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "3.0.1-rc4"
}
local = {
source = "hashicorp/local"
version = ">= 2.5.2"
}
}
}
provider "proxmox" {
pm_api_url = "https://${var.pm_host}:8006/api2/json"
pm_user = var.pm_user
pm_password = var.pm_password
pm_tls_insecure = var.pm_tls_insecure
}

View File

@ -0,0 +1,9 @@
[master]
${k3s_master_ip}
[node]
${k3s_node_ip}
[k3s_cluster:children]
master
node

65
Terraform/variables.tf Normal file
View File

@ -0,0 +1,65 @@
variable "pm_user" {
description = "The username for the proxmox user"
type = string
sensitive = false
default = "root@pam"
}
variable "pm_password" {
description = "The password for the proxmox user"
type = string
sensitive = true
}
variable "pm_tls_insecure" {
description = "Set to true to ignore certificate errors"
type = bool
default = false
}
variable "pm_host" {
description = "The hostname or IP of the proxmox server"
type = string
}
variable "pm_node_name" {
description = "name of the proxmox node to create the VMs on"
type = string
default = "pve"
}
variable "pvt_key" {}
variable "num_k3s_masters" {
default = 1
}
variable "num_k3s_masters_mem" {
default = "4096"
}
variable "num_k3s_nodes" {
default = 2
}
variable "num_k3s_nodes_mem" {
default = "4096"
}
variable "tamplate_vm_name" {}
variable "master_ips" {
description = "List of ip addresses for master nodes"
}
variable "worker_ips" {
description = "List of ip addresses for worker nodes"
}
variable "networkrange" {
default = 24
}
variable "gateway" {
default = "192.168.0.1"
}

14
ansible.cfg Normal file
View File

@ -0,0 +1,14 @@
[defaults]
nocows = True
roles_path = ./roles
inventory = ./inventory/my-cluster/hosts.ini
remote_tmp = $HOME/.ansible/tmp
local_tmp = $HOME/.ansible/tmp
pipelining = True
become = True
host_key_checking = False
deprecation_warnings = False
callback_whitelist = profile_tasks
callback_enabled = yes
display_failed_stderr = yes

9
cloud-init/master.yml Normal file
View File

@ -0,0 +1,9 @@
#cloud-config
users:
- name: root
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDItH73+vTvxxgMlv8vzpRt59KeKykYGhMhOCt+uWxbsuhjPvXRQC6dCwuDLy8heiYFO8bklOiLxLtz3GBOtp4OcjVRkgS7L4+qUn8QkAaJPQeEUuKADrCpxxLz0rYsgLo9WvQ9HS/WS15wmMHbSufGjXjhApZ3VODMyrtdaDOoyKm+YMahxY577TkX3yIdv3+yENPhP+rNdcWxFKYvEzOz2XACvq81fxfcYLN5opPbz+UILnQSyxI+TxZtzq3icPQAsVXPmZGBbryiSk3e5tFhE7ORkw1I2QG4CBEPZx+gAhbO0p3sCcdpLF7z4HxaGzJKpy6V8JxZHmLJCgQeSsgaeP3OvTU/lgsWw6xphEpQqJmb9dMjtJMyV8I/PxrLPP9ikh5tcqlXENLXSc6V4BkI1NUJZhYm0sYPcWW2ZeYy6gGzYiSgu3wqzqf0yG9j8NnMtdyvBLMhNKasqfd0CRK+CQ3apMghC68X7JK7CDA/edjfl2MA/QJ2ZoYBBzyXd9vUJgMlyZaxXG9NIA7rU88OZTmS+43y1BRNlkXh231EjtH7h25n+nYxYInFtFWbbi1liORxVO622Y4YnCdTJFoyiFGsuzSaDYfjRMzSHOjnUlUVsqYHPIfH6h/ZH5vVrAMihnIhqJDbi1rLtZVx0GsmpXMAWAee2oi4rEcEynydMQ== gba404@gmail.com
lock_passwd: false
passwd: $1$/bB7Q1vR$Nz4PtA52uDdF6.pc.haec/
package_update: true
package_upgrade: true

BIN
inventory/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,14 @@
k3s_version: v1.22.2+k3s1
ansible_user: root
systemd_dir: /etc/systemd/system
master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}"
extra_server_args: "--write-kubeconfig-mode=644"
extra_agent_args: ""
copy_kubeconfig: true
metallb: false
metallb_version: "v0.12.1"
metallb_range: "192.168.3.93-192.168.3.94"
argocd: false
argocd_service_type: LoadBalancer
dns_servers: []
ansible_python_interpreter: /usr/bin/python3

BIN
inventory/my-cluster/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,12 @@
[master]
192.168.0.109 ansible_ssh_private_key_file=/Users/nikolajzareckij/.ssh/id_rsa ansible_python_interpreter=/usr/bin/python3.12
[node]
192.168.0.110 ansible_ssh_private_key_file=/Users/nikolajzareckij/.ssh/id_rsa ansible_python_interpreter=/usr/bin/python3.12
192.168.0.111 ansible_ssh_private_key_file=/Users/nikolajzareckij/.ssh/id_rsa ansible_python_interpreter=/usr/bin/python3.12
[k3s_cluster:children]
master
node
[all:vars]
ansible_python_interpreter=/usr/bin/python3.12

7
reset.yml Normal file
View File

@ -0,0 +1,7 @@
---
- hosts: k3s_cluster
gather_facts: true
become: true
roles:
- role: reset

View File

@ -0,0 +1,36 @@
---
- name: Download k3s binary x64
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when: ansible_facts.architecture == "x86_64"
- name: Download k3s binary arm64
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when:
- ( ansible_facts.architecture is search("arm") and
ansible_facts.userspace_bits == "64" ) or
ansible_facts.architecture is search("aarch64")
- name: Download k3s binary armhf
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when:
- ansible_facts.architecture is search("arm")
- ansible_facts.userspace_bits == "32"

View File

@ -0,0 +1,83 @@
---
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: true
state: restarted
enabled: true
- name: Wait for node-token
wait_for:
path: /var/lib/rancher/k3s/server/node-token
- name: Register node-token file access mode
stat:
path: /var/lib/rancher/k3s/server
register: p
- name: Change file access node-token
file:
path: /var/lib/rancher/k3s/server
mode: "g+rx,o+rx"
- name: Read node-token from master
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: node_token
- name: Store Master node-token
set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
path: /var/lib/rancher/k3s/server
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Change k3s.yaml permissions to 644
file:
path: /etc/rancher/k3s/k3s.yaml
owner: "{{ ansible_user }}"
mode: "644"
- name: Replace https://localhost:6443 by https://master-ip:6443
command: >-
k3s kubectl config set-cluster default
--server=https://{{ master_ip }}:6443
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
- name: Create kubectl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
- name: Create crictl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link
- name: copy config to local host
fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ~/.kube/config
flat: true
when: copy_kubeconfig

View File

@ -0,0 +1,34 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
Type=notify
EnvironmentFile=-/etc/default/%N
EnvironmentFile=-/etc/sysconfig/%N
EnvironmentFile=-/etc/systemd/system/k3s.service.env
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s \
server \
--write-kubeconfig-mode 644 \
{% if metallb is sameas true %}
--disable servicelb \
{% endif %}

View File

@ -0,0 +1,32 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
Type=notify
EnvironmentFile=-/etc/default/%N
EnvironmentFile=-/etc/sysconfig/%N
EnvironmentFile=-/etc/systemd/system/k3s.service.env
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s \
server \
--write-kubeconfig-mode 644 \
--disable traefik \

View File

@ -0,0 +1,16 @@
---
- name: Copy K3s service file
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: 0755
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: true
state: restarted
enabled: true

View File

@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,65 @@
---
- name: test kubeconfig path
stat:
path: ~/.kube/config
register: kubeconfig_path
- name: replace host ip address in the kubeconfig
replace:
path: ~/.kube/config
regexp: "https://127.0.0.1:6443"
replace: "https://{{ master_ip }}:6443"
when: kubeconfig_path and copy_kubeconfig
- name: Change k3s.yaml permissions to 644
file:
path: ~/.kube/config
mode: "600"
- name: check if helm is installed /usr/local/bin/helm
stat:
path: $HOME/.config/helm/repositories.yaml
register: helm_check
- name: Download get-helm-3
get_url:
url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
dest: ~/get-helm-3.sh
mode: "700"
when: not helm_check.stat.exists
- name: install helm if not exist
command: >-
~/get-helm-3.sh
when: not helm_check.stat.exists
changed_when: true
- name: Install metallb
shell: |
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/{{ metallb_version }}/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/{{ metallb_version }}/manifests/metallb.yaml
when: metallb
- name: configure metallb range
shell: |
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- {{ metallb_range }}
when: metallb
- name: Install argocd
shell: |
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "{{ argocd_service_type }}"}}'
when: argocd

View File

@ -0,0 +1 @@
dns_servers: []

View File

@ -0,0 +1,65 @@
---
- name: Set SELinux to disabled state
selinux:
state: disabled
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux']
- name: Enable IPv4 forwarding
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: true
- name: Enable IPv6 forwarding
sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: true
- name: fix dns servers in resolv.conf
template:
src: resolv.conf.j2
dest: /etc/resolv.conf
when:
- dns_servers | length() > 0
- name: Add br_netfilter to /etc/modules-load.d/
copy:
content: "br_netfilter"
dest: /etc/modules-load.d/br_netfilter.conf
mode: "u=rw,g=,o="
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux']
- name: Load br_netfilter
modprobe:
name: br_netfilter
state: present
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux']
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
name: "{{ item }}"
value: "1"
state: present
reload: true
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux']
loop:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: "Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin"
regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present
insertafter: EOF
path: /etc/sudoers
validate: "visudo -cf %s"
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux']
- name: install nfs-common on the servers
package:
name: nfs-common
state: present

View File

@ -0,0 +1,3 @@
{% for dns_server in dns_servers %}
nameserver {{ dns_server }}
{% endfor %}

View File

@ -0,0 +1,3 @@
---
- name: reboot
reboot:

View File

@ -0,0 +1,51 @@
---
- name: Test for raspberry pi /proc/cpuinfo
command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
register: grep_cpuinfo_raspberrypi
failed_when: false
changed_when: false
- name: Test for raspberry pi /proc/device-tree/model
command: grep -E "Raspberry Pi" /proc/device-tree/model
register: grep_device_tree_model_raspberrypi
failed_when: false
changed_when: false
- name: Set raspberry_pi fact to true
set_fact:
raspberry_pi: true
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian
set_fact:
detected_distribution: Raspbian
when: >
raspberry_pi|default(false) and
( ansible_facts.lsb.id|default("") == "Raspbian" or
ansible_facts.lsb.description|default("") is match("[Rr]aspbian.*") )
- name: Set detected_distribution to Raspbian (ARM64 on Debian Buster)
set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*buster")
- name: Set detected_distribution_major_version
set_fact:
detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}"
when:
- detected_distribution | default("") == "Raspbian"
- name: execute OS related tasks on the Raspberry Pi
include_tasks: "{{ item }}"
with_first_found:
- "prereq/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "prereq/{{ detected_distribution }}.yml"
- "prereq/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "prereq/{{ ansible_distribution }}.yml"
- "prereq/default.yml"
when:
- raspberry_pi|default(false)

View File

@ -0,0 +1,8 @@
---
- name: Enable cgroup via boot commandline if not already enabled for Centos
lineinfile:
path: /boot/cmdline.txt
backrefs: true
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot

View File

@ -0,0 +1,25 @@
---
- name: Activating cgroup support
lineinfile:
path: /boot/cmdline.txt
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
backrefs: true
notify: reboot
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
changed_when: false # iptables flush always returns changed
- name: Changing to iptables-legacy
alternatives:
path: /usr/sbin/iptables-legacy
name: iptables
register: ip4_legacy
- name: Changing to ip6tables-legacy
alternatives:
path: /usr/sbin/ip6tables-legacy
name: ip6tables
register: ip6_legacy

View File

@ -0,0 +1,8 @@
---
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
lineinfile:
path: /boot/firmware/cmdline.txt
backrefs: true
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
notify: reboot

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,42 @@
---
- name: Disable services
systemd:
name: "{{ item }}"
state: stopped
enabled: false
failed_when: false
with_items:
- k3s
- k3s-node
- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: "pkill_containerd_shim_runc.rc == 0"
failed_when: false
- name: Umount k3s filesystems
include_tasks: umount_with_children.yml
with_items:
- /run/k3s
- /var/lib/kubelet
- /run/netns
- /var/lib/rancher/k3s
loop_control:
loop_var: mounted_fs
- name: Remove service files, binaries and data
file:
name: "{{ item }}"
state: absent
with_items:
- /usr/local/bin/k3s
- "{{ systemd_dir }}/k3s.service"
- "{{ systemd_dir }}/k3s-node.service"
- /etc/rancher/k3s
- /var/lib/kubelet
- /var/lib/rancher/k3s
- name: daemon_reload
systemd:
daemon_reload: true

View File

@ -0,0 +1,16 @@
---
- name: Get the list of mounted filesystems
shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
register: get_mounted_filesystems
args:
executable: /bin/bash
failed_when: false
changed_when: get_mounted_filesystems.stdout | length > 0
check_mode: false
- name: Umount filesystem
mount:
path: "{{ item }}"
state: unmounted
with_items:
"{{ get_mounted_filesystems.stdout_lines | reverse | list }}"

28
site.yml Normal file
View File

@ -0,0 +1,28 @@
---
- hosts: all
gather_facts: true
vars:
ansible_python_interpreter: /usr/bin/python3.12
- hosts: k3s_cluster
gather_facts: true
become: true
roles:
- role: prereq
- role: download
- role: raspberrypi
- hosts: master
become: true
roles:
- role: k3s/master
- hosts: node
become: true
roles:
- role: k3s/node
- hosts: localhost
connection: local
roles:
- role: postconfig/localhost

View File

@ -0,0 +1,10 @@
#cloud-config
hostname: {{hostname}}
users:
- name: root
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDItH73+vTvxxgMlv8vzpRt59KeKykYGhMhOCt+uWxbsuhjPvXRQC6dCwuDLy8heiYFO8bklOiLxLtz3GBOtp4OcjVRkgS7L4+qUn8QkAaJPQeEUuKADrCpxxLz0rYsgLo9WvQ9HS/WS15wmMHbSufGjXjhApZ3VODMyrtdaDOoyKm+YMahxY577TkX3yIdv3+yENPhP+rNdcWxFKYvEzOz2XACvq81fxfcYLN5opPbz+UILnQSyxI+TxZtzq3icPQAsVXPmZGBbryiSk3e5tFhE7ORkw1I2QG4CBEPZx+gAhbO0p3sCcdpLF7z4HxaGzJKpy6V8JxZHmLJCgQeSsgaeP3OvTU/lgsWw6xphEpQqJmb9dMjtJMyV8I/PxrLPP9ikh5tcqlXENLXSc6V4BkI1NUJZhYm0sYPcWW2ZeYy6gGzYiSgu3wqzqf0yG9j8NnMtdyvBLMhNKasqfd0CRK+CQ3apMghC68X7JK7CDA/edjfl2MA/QJ2ZoYBBzyXd9vUJgMlyZaxXG9NIA7rU88OZTmS+43y1BRNlkXh231EjtH7h25n+nYxYInFtFWbbi1liORxVO622Y4YnCdTJFoyiFGsuzSaDYfjRMzSHOjnUlUVsqYHPIfH6h/ZH5vVrAMihnIhqJDbi1rLtZVx0GsmpXMAWAee2oi4rEcEynydMQ== gba404@gmail.com
lock_passwd: false
passwd: $1$/bB7Q1vR$Nz4PtA52uDdF6.pc.haec/
package_update: true
package_upgrade: true

View File

@ -0,0 +1,10 @@
#cloud-config
hostname: {{hostname}}
users:
- name: root
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDItH73+vTvxxgMlv8vzpRt59KeKykYGhMhOCt+uWxbsuhjPvXRQC6dCwuDLy8heiYFO8bklOiLxLtz3GBOtp4OcjVRkgS7L4+qUn8QkAaJPQeEUuKADrCpxxLz0rYsgLo9WvQ9HS/WS15wmMHbSufGjXjhApZ3VODMyrtdaDOoyKm+YMahxY577TkX3yIdv3+yENPhP+rNdcWxFKYvEzOz2XACvq81fxfcYLN5opPbz+UILnQSyxI+TxZtzq3icPQAsVXPmZGBbryiSk3e5tFhE7ORkw1I2QG4CBEPZx+gAhbO0p3sCcdpLF7z4HxaGzJKpy6V8JxZHmLJCgQeSsgaeP3OvTU/lgsWw6xphEpQqJmb9dMjtJMyV8I/PxrLPP9ikh5tcqlXENLXSc6V4BkI1NUJZhYm0sYPcWW2ZeYy6gGzYiSgu3wqzqf0yG9j8NnMtdyvBLMhNKasqfd0CRK+CQ3apMghC68X7JK7CDA/edjfl2MA/QJ2ZoYBBzyXd9vUJgMlyZaxXG9NIA7rU88OZTmS+43y1BRNlkXh231EjtH7h25n+nYxYInFtFWbbi1liORxVO622Y4YnCdTJFoyiFGsuzSaDYfjRMzSHOjnUlUVsqYHPIfH6h/ZH5vVrAMihnIhqJDbi1rLtZVx0GsmpXMAWAee2oi4rEcEynydMQ== gba404@gmail.com
lock_passwd: false
passwd: $1$/bB7Q1vR$Nz4PtA52uDdF6.pc.haec/
package_update: true
package_upgrade: true