diff --git a/README.md b/README.md
index 2b6dc5f..abaa25b 100644
--- a/README.md
+++ b/README.md
@@ -9,3 +9,5 @@ Deploy kubernetes
https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2?tab=doc
https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/
+
+https://github.com/XenitAB/spegel
diff --git a/defaults/main.yml b/defaults/main.yml
index 3bbf3a5..1a656c5 100644
--- a/defaults/main.yml
+++ b/defaults/main.yml
@@ -2,7 +2,16 @@
# value for kubernetes_cri: containerd, cri-o
#kubernetes_cri: "containerd"
kubernetes_server: false
+kubernetes_interface: '{{ ansible_default_ipv4.interface }}'
# value for kuberntes_network: flannel, calico, weave-net
#kubernetes_network: weave-net
kubernetes_kubeproxy_mode: ipvs
-kubernetes_version: 1.20.1
+kubernetes_version: 1.31.5
+kubernetes_k3s_version: 1.31.5+k3s1
+#kubernetes_pods_network: "10.244.0.0/16"
+#kubernetes_svc_network: "10.96.0.0/12"
+kubernetes_pods_network: "10.42.0.0/16"
+kubernetes_svc_network: "10.43.0.0/16"
+lb_auth_pass: 1be344d62acc46c6858ae8475668a245
+kubernetes_swap_enabled: false
+kubernetes_lvm: true
diff --git a/files/etc/NetworkManager/conf.d/calico.conf b/files/etc/NetworkManager/conf.d/calico.conf
new file mode 100644
index 0000000..b4ac62a
--- /dev/null
+++ b/files/etc/NetworkManager/conf.d/calico.conf
@@ -0,0 +1,3 @@
+# https://docs.tigera.io/calico/latest/operations/troubleshoot/troubleshooting#configure-networkmanager
+[keyfile]
+unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:wireguard.cali
diff --git a/files/etc/kubernetes/policies/audit-policy.yaml b/files/etc/kubernetes/policies/audit-policy.yaml
index 25b8fd0..9067920 100644
--- a/files/etc/kubernetes/policies/audit-policy.yaml
+++ b/files/etc/kubernetes/policies/audit-policy.yaml
@@ -10,7 +10,15 @@ rules:
- group: ""
# Resource "pods" doesn't match requests to any subresource of pods,
# which is consistent with the RBAC policy.
- resources: ["pods"]
+ resources: ["pods", "deployments"]
+
+ - level: RequestResponse
+ resources:
+ - group: "rbac.authorization.k8s.io"
+ # Resource "pods" doesn't match requests to any subresource of pods,
+ # which is consistent with the RBAC policy.
+ resources: ["clusterroles", "clusterrolebindings"]
+
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
@@ -48,11 +56,17 @@ rules:
# The empty string "" can be used to select non-namespaced resources.
namespaces: ["kube-system"]
- # Log configmap and secret changes in all other namespaces at the Metadata level.
+ # Log configmap changes in all other namespaces at the RequestResponse level.
+ - level: RequestResponse
+ resources:
+ - group: "" # core API group
+ resources: ["configmaps"]
+
+ # Log secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: "" # core API group
- resources: ["secrets", "configmaps"]
+ resources: ["secrets"]
# Log all other resources in core and extensions at the Request level.
- level: Request
diff --git a/files/etc/kubernetes/psa.yaml b/files/etc/kubernetes/psa.yaml
new file mode 100644
index 0000000..fe13d52
--- /dev/null
+++ b/files/etc/kubernetes/psa.yaml
@@ -0,0 +1,27 @@
+apiVersion: apiserver.config.k8s.io/v1
+kind: AdmissionConfiguration
+plugins:
+- name: PodSecurity
+ configuration:
+ apiVersion: pod-security.admission.config.k8s.io/v1beta1
+ kind: PodSecurityConfiguration
+ defaults:
+ enforce: "restricted"
+ enforce-version: "latest"
+ audit: "restricted"
+ audit-version: "latest"
+ warn: "restricted"
+ warn-version: "latest"
+ exemptions:
+ usernames: []
+ runtimeClasses: []
+ namespaces: [kube-system, cis-operator-system]
+- name: EventRateLimit
+ configuration:
+ apiVersion: eventratelimit.admission.k8s.io/v1alpha1
+ kind: Configuration
+ limits:
+ - burst: 20000
+ qps: 5000
+ type: Server
+ path: ""
diff --git a/meta/main.yml b/meta/main.yml
index 5968623..ad6ee72 100644
--- a/meta/main.yml
+++ b/meta/main.yml
@@ -6,11 +6,13 @@ galaxy_info:
galaxy_tags: []
license: GPL2
platforms:
- - name: CentOS
- version:
- - 7
- - 8
- - name: RedHat
- version:
- - 7
- - 8
+ - name: CentOS
+ version:
+ - 7
+ - 8
+ - 9
+ - name: RedHat
+ version:
+ - 7
+ - 8
+ - 9
diff --git a/tasks/Debian.yml b/tasks/Debian.yml
index 9952f0a..c4b91da 100644
--- a/tasks/Debian.yml
+++ b/tasks/Debian.yml
@@ -1,21 +1,21 @@
---
-- name: add docker apt key
- apt_key:
+- name: Add docker apt key
+ ansible.builtin.apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
when:
- docker_ver == "docker_ce"
-- name: add docker repository
- apt_repository:
+- name: Add docker repository
+ ansible.builtin.apt_repository:
repo: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable'
state: present
- update_cache: yes
+ update_cache: true
when:
- docker_ver == "docker_ce"
- name: "Ensure GRUB_CMDLINE_LINUX is updated"
- lineinfile:
+ ansible.builtin.lineinfile:
dest: /etc/default/grub
regexp: '^(GRUB_CMDLINE_LINUX=".*)"$'
line: '\1 cgroup_enable=memory swapaccount=1"'
@@ -24,12 +24,12 @@
- not docker_installed.stat.exists
- name: "Update grub.conf"
- command: update-grub
+ ansible.builtin.command: update-grub
when:
- not docker_installed.stat.exists
- name: "Ensure DEFAULT_FORWARD_POLICY in /etc/default/ufw is updated"
- lineinfile:
+ ansible.builtin.lineinfile:
dest: /etc/default/ufw
regexp: '^(DEFAULT_FORWARD_POLICY=").*"$'
line: '\1ACCEPT"'
@@ -38,11 +38,11 @@
tags: [docker,firewall]
# Need Certificat ? Only in local
-#- name: "Add docker port 2376/TCP "
-# ufw: rule=allow port=2376 proto=tcp
-# notify: reload ufw
-# tags: [docker,firewall]
+# - name: "Add docker port 2376/TCP "
+# ufw: rule=allow port=2376 proto=tcp
+# notify: reload ufw
+# tags: [docker,firewall]
-#- name: "Start UFW rules"
-# service: name=ufw state=started
-# tags: [docker,firewall]
+# - name: "Start UFW rules"
+# service: name=ufw state=started
+# tags: [docker,firewall]
diff --git a/tasks/RedHat.yml b/tasks/RedHat.yml
index 7eaf639..bcbf718 100644
--- a/tasks/RedHat.yml
+++ b/tasks/RedHat.yml
@@ -1,59 +1,159 @@
---
-#- name: Add kubernetes repository
-# yumrepo:
-# name: kubernetes
-# description: "Kubernetes Repository"
-# baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-$releasever-x86_64
-# gpgcheck: yes
-# enabled: yes
-# gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg
-# state: present
+# - name: Add kubernetes repository
+# yumrepo:
+# name: kubernetes
+# description: "Kubernetes Repository"
+# baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-$releasever-x86_64
+# gpgcheck: yes
+# enabled: true
+# gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg
+# state: present
-- name: Add Official kubernetes's repo
- template:
- src: "etc/yum.repos.d/kubernetes.repo.j2"
- dest: "/etc/yum.repos.d/kubernetes.repo"
- group: root
- owner: root
- mode: 0644
+# - name: Add Official kubernetes's repo
+# ansible.builtin.template:
+# src: "etc/yum.repos.d/kubernetes.repo.j2"
+# dest: "/etc/yum.repos.d/kubernetes.repo"
+# group: root
+# owner: root
+# mode: 0644
+# when:
+# - not ansible_machine == "armv7l"
+# - not ansible_machine == "armv6l"
+# - kubernetes_cri != "k3s"
+
+- name: Add Official kubernetes's repo on servers
+ ansible.builtin.yum_repository:
+ name: kubernetes
+ description: Kubernetes
+ baseurl: "https://pkgs.k8s.io/core:/stable:/v{{ kubernetes_version | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') }}/rpm/"
+ enabled: true
+ gpgcheck: true
+ repo_gpgcheck: true
+ gpgkey: "https://pkgs.k8s.io/core:/stable:/v{{ kubernetes_version | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') }}/rpm/repodata/repomd.xml.key"
+ exclude: kubelet kubeadm
+ become: true
when:
- not ansible_machine == "armv7l"
- not ansible_machine == "armv6l"
+ - kubernetes_server|bool
- kubernetes_cri != "k3s"
-- name: Register kubernetes firewalld service
- template:
- src: "etc/firewalld/services/kubernetes.xml.j2"
- dest: "/etc/firewalld/services/kubernetes.xml"
- group: root
- owner: root
- mode: 0644
- register: need_firewalld_reload
+- name: Add Official kubernetes's repo for Desktop
+ ansible.builtin.yum_repository:
+ name: kubernetes
+ description: Kubernetes
+ baseurl: "https://pkgs.k8s.io/core:/stable:/v{{ kubernetes_version | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') }}/rpm/"
+ enabled: true
+ gpgcheck: true
+ repo_gpgcheck: true
+ gpgkey: "https://pkgs.k8s.io/core:/stable:/v{{ kubernetes_version | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') }}/rpm/repodata/repomd.xml.key"
+ exclude: kubelet kubeadm kubectl
+ become: true
when:
- - kubernetes_server|bool
+ - not ansible_machine == "armv7l"
+ - not ansible_machine == "armv6l"
+ - not kubernetes_server|bool
-#- name: Reload firewalld configuration
-# service:
-# name: firewalld
-# state: reloaded
-# enabled: yes
-# when:
-# - kubernetes_server|bool
+# - name: Redhat | Installing K8s Packages
+# ansible.builtin.package:
+# name:
+# - kubectl
+# - kubelet
+# - kubeadm
+# - iproute-tc
+# - ipvsadm
+# state: present
+# disable_excludes: kubernetes
+# become: true
+# register: result
+# until: result is successful
-- name: reload firewalld to refresh service list
- command: firewall-cmd --reload
- when:
- - need_firewalld_reload is changed
- - kubernetes_server|bool
+# - name: Register kubernetes firewalld service
+# ansible.builtin.template:
+# src: "etc/firewalld/services/kubernetes.xml.j2"
+# dest: "/etc/firewalld/services/kubernetes.xml"
+# group: root
+# owner: root
+# mode: 0644
+# register: need_firewalld_reload
+# when:
+# - kubernetes_server|bool
+#
+# - name: Reload firewalld configuration
+# ansible.builtin.service:
+# name: firewalld
+# state: reloaded
+# enabled: true
+# when:
+# - kubernetes_server|bool
+# - need_firewalld_reload is changed
+#
+## Définir interface
+# - name: Open Firewalld
+# ansible.posix.firewalld:
+# zone: external
+# service: kubernetes
+# permanent: true
+# state: enabled
+# immediate: true
+# when:
+## - need_firewall|bool
+## - firewall_name == "firewalld"
+# - kubernetes_server|bool
-# Définir interface
-- name: Open Firewalld
- firewalld:
- service: kubernetes
+# - name: Create kubernetes firewalld zone
+# ansible.posix.firewalld:
+# zone: kubernetes
+# permanent: true
+# state: present
+# when:
+# - kubernetes_server|bool
+- name: Add kubernetes networks to trusted firewalld zone
+ ansible.posix.firewalld:
+# zone: kubernetes
+ zone: trusted
permanent: true
state: enabled
- immediate: true
+ source: "{{ item }}"
when:
-# - need_firewall|bool
-# - firewall_name == "firewalld"
- kubernetes_server|bool
+ with_items:
+ - "{{ kubernetes_pods_network }}"
+ - "{{ kubernetes_svc_network }}"
+
+- name: Add kubernetes networks to trusted firewalld zone
+ ansible.posix.firewalld:
+# zone: kubernetes
+ zone: trusted
+ permanent: true
+ state: enabled
+ source: "{{ item }}"
+ when:
+ - kubernetes_server|bool
+ - kubernetes_interface is defined
+# - false
+ with_items:
+ - "{{ (lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.network + '/' + lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.netmask) | ansible.utils.ipaddr('net') }}"
+
+- name: Install kubernetes tools
+ ansible.builtin.dnf:
+ name: "{{ kubernetes_package_name }}"
+ enablerepo: "kubernetes"
+ state: present
+ update_cache: true
+ disable_excludes: kubernetes
+# notify: Restart kubelet
+ when:
+ - ansible_pkg_mgr == "dnf"
+ - (not kubernetes_server|bool) or ( kubernetes_server|bool and kubernetes_cri != "k3s")
+
+- name: Install kubernetes tools
+ ansible.builtin.yum:
+ name: "{{ kubernetes_package_name }}"
+ enablerepo: "kubernetes"
+ state: present
+ update_cache: true
+# notify: Restart kubelet
+ when:
+ - ansible_pkg_mgr == "yum"
+ - (not kubernetes_server|bool) or ( kubernetes_server|bool and kubernetes_cri != "k3s")
diff --git a/tasks/cluster_k3s.yml b/tasks/cluster_k3s.yml
index 9727eb0..afe4024 100644
--- a/tasks/cluster_k3s.yml
+++ b/tasks/cluster_k3s.yml
@@ -1,19 +1,46 @@
---
- name: Install Wireguard
- include_role:
+ ansible.builtin.include_role:
name: wireguard
-# when:
+ when:
# - kubernetes_cni == "wireguard"
+ - "'Vpn' not in group_names"
+
+- name: Import Rancher key
+ ansible.builtin.rpm_key:
+ state: present
+ key: https://rpm.rancher.io/public.key
+ when:
+ - ansible_os_family == "RedHat"
+
+- name: Install the k3s-selinux rpm from a remote repo for yum distro
+ ansible.builtin.yum:
+ name: "https://github.com/k3s-io/k3s-selinux/releases/download/v1.6.stable.1/k3s-selinux-1.6-1.el7.noarch.rpm"
+ state: present
+ when:
+ - ansible_pkg_mgr == "yum"
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_major_version == '7'
+
+- name: Install the k3s-selinux rpm from a remote repo for dnf distro
+ ansible.builtin.dnf:
+ name: "https://github.com/k3s-io/k3s-selinux/releases/download/v1.6.stable.1/k3s-selinux-1.6-1.el{{ ansible_distribution_major_version }}.noarch.rpm"
+ state: present
+ when:
+ - ansible_pkg_mgr == "dnf"
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_major_version >= '8'
- name: Check if /usr/local/bin/k3s already existe
- stat:
+ ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_bin
+ check_mode: false
changed_when: False
-- name: retreive k3s binary for x86_64
- get_url:
- url: "https://github.com/rancher/k3s/releases/download/v1.20.2%2Bk3s1/k3s"
+- name: Retreive k3s binary for x86_64
+ ansible.builtin.get_url:
+ url: "https://github.com/rancher/k3s/releases/download/v{{ kubernetes_k3s_version | urlencode }}/k3s"
dest: "/usr/local/bin/k3s"
group: root
owner: root
@@ -22,9 +49,9 @@
- not k3s_bin.stat.exists
- ansible_machine == "x86_64"
-- name: retreive k3s binary for arm64
- get_url:
- url: "https://github.com/rancher/k3s/releases/download/v1.20.2%2Bk3s1/k3s-arm64"
+- name: Retreive k3s binary for arm64
+ ansible.builtin.get_url:
+ url: "https://github.com/rancher/k3s/releases/download/v{{ kubernetes_k3s_version | urlencode }}/k3s-arm64"
dest: "/usr/local/bin/k3s"
group: root
owner: root
@@ -33,9 +60,9 @@
- not k3s_bin.stat.exists
- ansible_machine == "arm64"
-- name: retreive k3s binary for armv6/armv7
- get_url:
- url: "https://github.com/rancher/k3s/releases/download/v1.20.2%2Bk3s1/k3s-armhf"
+- name: Retreive k3s binary for armv6/armv7
+ ansible.builtin.get_url:
+ url: "https://github.com/rancher/k3s/releases/download/v{{ kubernetes_k3s_version | urlencode }}/k3s-armhf"
dest: "/usr/local/bin/k3s"
group: root
owner: root
@@ -45,7 +72,7 @@
- (ansible_machine == "armv7l") or (ansible_machine == "armv6l")
- name: Create tools link
- file:
+ ansible.builtin.file:
src: "k3s"
dest: "/usr/local/bin/{{ item }}"
owner: root
@@ -56,49 +83,225 @@
- "crictl"
- "ctr"
-# Manque kubernetes_server_token, kubernetes_master url
+- name: Create logical volume for k3s
+ when:
+ - kubernetes_lvm|bool
+ block:
+ - name: Create thin volumes for k3s
+ community.general.lvol:
+ vg: "{{ item.vg }}"
+ lv: "{{ item.name }}"
+ thinpool: kubernetes
+ size: "{{ item.size }}"
+ with_items:
+ - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
-- name: Deploy systemd service
- template:
- src: "etc/systemd/system/{{ item }}.j2"
- dest: "/etc/systemd/system/{{ item }}"
+ - name: Create file system on containerd lv
+ community.general.filesystem:
+ fstype: ext4
+ dev: "/dev/{{ item.vg }}/{{ item.name }}"
+ with_items:
+ - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
+
+ - name: Mount logical volumes
+ ansible.posix.mount:
+ name: "{{ item.mount_point }}"
+ src: "/dev/{{ item.vg }}/{{ item.name }}"
+ fstype: ext4
+ opts: "{{ item.mount_opts }}"
+ state: mounted
+ with_items:
+ - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
+
+- name: Ensure protect-kernel-defaults is set
+ ansible.posix.sysctl:
+ name: "{{ item.name }}"
+ value: "{{ item.value }}"
+ sysctl_file: /etc/sysctl.d/90-kubelet.conf
+ reload: true
+ with_items:
+ - { name: "vm.panic_on_oom", value: "0" }
+ - { name: "vm.overcommit_memory", value: "1" }
+ - { name: "kernel.panic", value: "10" }
+ - { name: "kernel.panic_on_oops", value: "1" }
+ when:
+ - kubernetes_server|bool
+
+- name: /etc/kubernetes directory
+ ansible.builtin.file:
+ path: "/etc/kubernetes"
+ state: directory
owner: root
group: root
- mode: 0600
- with_items:
- - "k3s.service"
- - "k3s.service.env"
+ mode: 0755
when:
- - ansible_service_mgr == "systemd"
+ - kubernetes_master|bool
-- name: Create thin volumes for k3s
- lvol:
- vg: "{{ item.vg }}"
- lv: "{{ item.name }}"
- thinpool: kubernetes
- size: "{{ item.size }}"
- with_items:
- - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
+- name: Configure Pod Security
+ ansible.builtin.copy:
+ src: "etc/kubernetes/psa.yaml"
+ dest: "/etc/kubernetes/psa.yaml"
+ group: root
+ owner: root
+ mode: 0644
+ when:
+ - kubernetes_master|bool
+
+- name: Audit policies directory
+ ansible.builtin.file:
+ path: "/etc/kubernetes/policies"
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+ when:
+ - kubernetes_master|bool
+
+- name: Configure audit policy
+ ansible.builtin.copy:
+ src: "etc/kubernetes/policies/audit-policy.yaml"
+ dest: "/etc/kubernetes/policies/audit-policy.yaml"
+ group: root
+ owner: root
+ mode: 0644
+ when:
+ - kubernetes_master|bool
+
+# Check controlers
+- name: Check if /etc/rancher/k3s/k3s.yaml already existe
+ ansible.builtin.stat:
+ path: /etc/rancher/k3s/k3s.yaml
+ register: st
+ check_mode: false
+ changed_when: False
+ when:
+ - kubernetes_master|bool
+
+- name: Create KubernetesMasterConfigured group
+ ansible.builtin.group_by:
+ key: KubernetesMasterConfigured_{{ kubernetes_cluster_name }}
+ check_mode: false
+ when:
+ - kubernetes_master|bool
+ - st.stat.exists
+
+# First controler
+- name: Configure first controler
+# run_once: true
+ block:
+ - name: Create k3s directories on master nodes
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+ with_items:
+ - "/etc/rancher"
+ - "/etc/rancher/k3s"
+ - "/etc/rancher/k3s/config.yaml.d"
+ - "/var/lib/rancher"
+ - "/var/lib/rancher/k3s"
+ - "/var/lib/rancher/k3s/server"
+ - "/var/lib/rancher/k3s/server/manifests"
+ when:
+ - kubernetes_master|bool
+
+ - name: Create k3s directories on all nodes
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+ with_items:
+ - "/var/lib/rancher/k3s/storage"
+
+# semanage fcontext -a -t container_file_t "/var/lib/rancher/k3s/storage(/.*)?"
+ - name: Allow K3S local-path provisioner to create directories in /var/lib/rancher/k3s/storage
+ community.general.sefcontext:
+ target: '/var/lib/rancher/k3s/storage(/.*)?'
+ setype: container_file_t
+ state: present
+ - name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -R /var/lib/rancher/k3s/storage/
+
+ - name: Deploy Network Policies
+ ansible.builtin.template:
+ src: "{{ item }}.j2"
+ dest: "/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - "var/lib/rancher/k3s/server/manifests/np-00-intra-namespace.yaml"
+ - "var/lib/rancher/k3s/server/manifests/np-01-default-network-dns-policy.yaml"
+ - "var/lib/rancher/k3s/server/manifests/np-03-metrics-server-traefik.yaml"
+ when:
+ - kubernetes_master|bool
+
+ - name: Deploy systemd service
+ ansible.builtin.template:
+ src: "{{ item }}.j2"
+ dest: "/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - "etc/systemd/system/k3s.service"
+ - "etc/systemd/system/k3s.service.env"
+ - "etc/rancher/k3s/config.yaml"
+ when:
+ - ansible_service_mgr == "systemd"
+
+ - name: Reload systemd
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+ - name: Enable k3s on boot
+ ansible.builtin.service:
+ name: k3s
+ state: started
+ enabled: true
+
+ - name: Wait for k3s.yaml
+ wait_for:
+ path: /etc/rancher/k3s/k3s.yaml
+
+ - name: Wait for token
+ wait_for:
+ path: /var/lib/rancher/k3s/server/token
+
+ - name: Add {{ ansible_hostname }} to KubernetesMasterConfigured group
+ ansible.builtin.group_by:
+ key: KubernetesMasterConfigured_{{ kubernetes_cluster_name }}
+ check_mode: false
+
+ when:
+ - kubernetes_master|bool
+ - vars['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is not defined
+
+# chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt
+
+# Manque kubernetes_server_token, kubernetes_master url
+
+# - name: Deploy systemd service
+# ansible.builtin.template:
+# src: "etc/systemd/system/{{ item }}.j2"
+# dest: "/etc/systemd/system/{{ item }}"
+# owner: root
+# group: root
+# mode: 0600
+# with_items:
+# - "k3s.service"
+# - "k3s.service.env"
+# when:
+# - ansible_service_mgr == "systemd"
-- name: create file system on containerd lv
- filesystem:
- fstype: ext4
- dev: "/dev/{{ item.vg }}/{{ item.name }}"
- with_items:
- - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
-- name: mount logical volumes
- mount:
- name: "{{ item.mount_point }}"
- src: "/dev/{{ item.vg }}/{{ item.name }}"
- fstype: ext4
- opts: "{{ item.mount_opts }}"
- state: mounted
- with_items:
- - { name: var_lib_k3s, vg: vg_sys, size: 10g, mount_point: /var/lib/rancher/k3s, mount_opts: "discard"}
- name: Enable k3s on boot
- service:
+ ansible.builtin.service:
name: k3s
state: started
- enabled: yes
+ enabled: true
diff --git a/tasks/cluster_kubeadm.yml b/tasks/cluster_kubeadm.yml
index c15325d..c11bf3a 100644
--- a/tasks/cluster_kubeadm.yml
+++ b/tasks/cluster_kubeadm.yml
@@ -1,27 +1,46 @@
---
- name: Install Containerd
- include_role:
+ ansible.builtin.include_role:
name: containerd
when:
- kubernetes_cri == "containerd"
- #register: kubernetes_cri_changed
+ # register: kubernetes_cri_changed
- name: Install CRI-O
- include_role:
+ ansible.builtin.include_role:
name: cri-o
when:
- kubernetes_cri == "cri-o"
- #register: kubernetes_cri_changed
+ # register: kubernetes_cri_changed
-#- name: Restart kubelet after kubernetes cri installation
-# service:
-# name: kubelet
-# status: restarted
-# when:
-# - kubernetes_cri_changed is changed
+# - name: Restart kubelet after kubernetes cri installation
+# ansible.builtin.service:
+# name: kubelet
+# status: restarted
+# when:
+# - kubernetes_cri_changed is changed
+
+- name: Configure NetworkManager for Calico
+ ansible.builtin.copy:
+ src: "etc/NetworkManager/conf.d/calico.conf"
+ dest: "/etc/NetworkManager/conf.d/calico.conf"
+ group: root
+ owner: root
+ mode: 0644
+ when:
+ - kubernetes_network == "calico"
+ - ansible_os_family == "RedHat"
+ register: kubernetes_network_networkmanager_changed
+
+- name: Restart kubelet after kubernetes cri installation
+ ansible.builtin.service:
+ name: NetworkManager
+ status: reload
+ when:
+ - kubernetes_network_networkmanager_changed is changed
- name: Configuring IPVS kernel module to be load on boot
- template:
+ ansible.builtin.template:
src: "etc/modules-load.d/ipvs.conf.j2"
dest: "/etc/modules-load.d/ipvs.conf"
group: root
@@ -31,7 +50,7 @@
- kubernetes_kubeproxy_mode == "ipvs"
- name: Load IPVS kernel module for EL7
- modprobe:
+ community.general.modprobe:
name: "{{ item }}"
state: present
with_items:
@@ -47,7 +66,7 @@
- ansible_distribution_major_version == '7'
- name: Load IPVS kernel module for EL8
- modprobe:
+ community.general.modprobe:
name: "{{ item }}"
state: present
with_items:
@@ -59,10 +78,10 @@
when:
- kubernetes_kubeproxy_mode == "ipvs"
- ansible_os_family == "RedHat"
- - ansible_distribution_major_version == '8'
+ - ansible_distribution_major_version >= '8'
- name: Create thin volumes for kubernetes
- lvol:
+ community.general.lvol:
vg: "{{ item.vg }}"
lv: "{{ item.name }}"
thinpool: kubernetes
@@ -74,8 +93,8 @@
when:
- kubernetes_master|bool
-- name: create file system on containerd lv
- filesystem:
+- name: Create file system on containerd lv
+ community.general.filesystem:
fstype: ext4
dev: "/dev/{{ item.vg }}/{{ item.name }}"
with_items:
@@ -85,8 +104,8 @@
when:
- kubernetes_master|bool
-- name: mount logical volumes
- mount:
+- name: Mount logical volumes
+ ansible.posix.mount:
name: "{{ item.mount_point }}"
src: "/dev/{{ item.vg }}/{{ item.name }}"
fstype: ext4
@@ -101,14 +120,14 @@
- kubernetes_master|bool
- name: Ensuring /var/lib/etcd/lost+found Folder does not exists
- file:
+ ansible.builtin.file:
path: "/var/lib/etcd/lost+found"
state: "absent"
when:
- partition_formated is changed
- name: Secure etcd directory
- file:
+ ansible.builtin.file:
path: "/var/lib/etcd"
state: directory
owner: root
@@ -118,7 +137,7 @@
- kubernetes_master|bool
- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists
- file:
+ ansible.builtin.file:
path: "/etc/systemd/system/kubelet.service.d"
state: "directory"
group: root
@@ -128,7 +147,7 @@
- ansible_service_mgr == "systemd"
- name: Configure kubelet service
- template:
+ ansible.builtin.template:
src: "etc/{{ item }}.j2"
dest: "/etc/{{ item }}"
group: root
@@ -140,8 +159,35 @@
when:
- ansible_service_mgr == "systemd"
+- name: Configure kubelet service for CRI-O
+ ansible.builtin.template:
+ src: "etc/{{ item }}.j2"
+ dest: "/etc/{{ item }}"
+ group: root
+ owner: root
+ mode: 0644
+ with_items:
+ - "systemd/system/kubelet.service.d/11-cgroups.conf"
+ when:
+ - ansible_service_mgr == "systemd"
+ - kubernetes_cri == "cri-o"
+
+- name: Enable Swap for kubelet service
+ ansible.builtin.template:
+ src: "etc/{{ item }}.j2"
+ dest: "/etc/{{ item }}"
+ group: root
+ owner: root
+ mode: 0644
+ with_items:
+ - "systemd/system/kubelet.service.d/20-allow-swap.conf"
+ when:
+ - ansible_service_mgr == "systemd"
+ - kubernetes_swap_enabled is defined
+ - kubernetes_swap_enabled|bool
+
- name: Configure kubelet service
- template:
+ ansible.builtin.template:
src: "etc/{{ item }}.j2"
dest: "/etc/{{ item }}"
group: root
@@ -153,107 +199,133 @@
- not ansible_service_mgr == "systemd"
- name: Enable kubelet on boot
- service:
+ ansible.builtin.service:
name: kubelet
state: started
- enabled: yes
+ enabled: true
+
+- name: Audit policies directory
+ ansible.builtin.file:
+ path: "/etc/kubernetes/policies"
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+ when:
+ - kubernetes_master|bool
+
+# https://v1-17.docs.kubernetes.io/docs/tasks/debug-application-cluster/falco/
+# https://github.com/falcosecurity/falco/blob/master/rules/k8s_audit_rules.yaml
+# Ou récupération de ces règles pour une utilisation avec falco
+
+- name: Configure audit policy
+ ansible.builtin.copy:
+ src: "etc/kubernetes/policies/audit-policy.yaml"
+ dest: "/etc/kubernetes/policies/audit-policy.yaml"
+ group: root
+ owner: root
+ mode: 0644
+ when:
+ - kubernetes_master|bool
# First controler
- name: Check if /etc/kubernetes/admin.conf already existe
- stat:
+ ansible.builtin.stat:
path: /etc/kubernetes/admin.conf
register: st
+ check_mode: false
changed_when: False
- name: Create KubernetesMasterConfigured group
- group_by:
- key: KubernetesMasterConfigured
+ ansible.builtin.group_by:
+ key: KubernetesMasterConfigured_{{ kubernetes_cluster_name }}
+ check_mode: false
when:
- st.stat.exists
- name: Retreive kubeadm Major version
- shell: set -o pipefail && kubeadm version | sed 's/.*{Major:"\([0-9]\)".*/\1/'
+ ansible.builtin.shell: set -o pipefail && kubeadm version | sed 's/.*{Major:"\([0-9]\)".*/\1/'
register: kubeadm_version_major
+ check_mode: false
changed_when: False
- name: Retreive kubeadm Minor version
- shell: set -o pipefail && kubeadm version | sed -e 's/.* Minor:"\([0-9]*\)".*/\1/'
+ ansible.builtin.shell: set -o pipefail && kubeadm version | sed -e 's/.* Minor:"\([0-9]*\)".*/\1/'
register: kubeadm_version_minor
+ check_mode: false
changed_when: False
- name: Defined a default lb_kubemaster
- set_fact:
- lb_kubemaster: "{{ groups['KubernetesMasters'][0] }}"
+ ansible.builtin.set_fact:
+ lb_kubemaster: "{{ groups['KubernetesMasters_' ~ kubernetes_cluster_name][0] }}"
when:
- lb_kubemaster is undefined
# - groups['KubernetesMasters'] | length > 1
changed_when: False
+ check_mode: false
-- name: Deploy initial kubeadm config
- template:
- src: kubeadm-config.yaml.j2
- dest: /root/kubeadm-config.yaml
- owner: root
- group: root
- mode: 0600
- when:
- - groups['KubernetesMasterConfigured'] is not defined
- - groups['KubernetesMasters'][0] == ansible_hostname
+- name: Deploy First controler
+ block:
+ - name: Deploy initial kubeadm config
+ ansible.builtin.template:
+ src: kubeadm-config.yaml.j2
+ dest: /root/kubeadm-config.yaml
+ owner: root
+ group: root
+ mode: 0600
-- name: Init Kubernetes on {{ groups['KubernetesMasters'][0] }}
- command: kubeadm init --config=/root/kubeadm-config.yaml
- when:
- - groups['KubernetesMasterConfigured'] is not defined
- - groups['KubernetesMasters'][0] == ansible_hostname
+ - name: Init Kubernetes on {{ groups['KubernetesMasters_' ~ kubernetes_cluster_name][0] }}
+ ansible.builtin.command: kubeadm init --config=/root/kubeadm-config.yaml
+
+ - name: Add {{ ansible_hostname }} to KubernetesMasterConfigured group
+ ansible.builtin.group_by:
+ key: KubernetesMasterConfigured_{{ kubernetes_cluster_name }}
+ check_mode: false
-- name: Add {{ ansible_hostname }} to KubernetesMasterConfigured group
- group_by:
- key: KubernetesMasterConfigured
when:
- - groups['KubernetesMasterConfigured'] is not defined
- - groups['KubernetesMasters'][0] == ansible_hostname
+ - groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is not defined
+ - groups['KubernetesMasters_' ~ kubernetes_cluster_name][0] == ansible_hostname
# End of first controler
- name: Test if server node already included
- command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes {{ ansible_hostname | lower }}
+ ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes {{ ansible_hostname | lower }}
delegate_to: "{{ lb_kubemaster }}"
register: server_enrolled
changed_when: False
ignore_errors: yes
- when:
- - groups['KubernetesMasterConfigured'] is not defined
+ check_mode: false
-#- name: Deploy kubeadm config
-# template:
-# src: kubeadm-config.yaml.j2
-# dest: /root/kubeadm-config.yaml
-# owner: root
-# group: root
-# mode: 600
-# when:
-# - not groups['KubernetesMasters'][0] == ansible_hostname
-# - server_enrolled.rc == 1
+# - name: Deploy kubeadm config
+# ansible.builtin.template:
+# src: kubeadm-config.yaml.j2
+# dest: /root/kubeadm-config.yaml
+# owner: root
+# group: root
+# mode: 600
+# when:
+# - not groups['KubernetesMasters'][0] == ansible_hostname
+# - server_enrolled.rc == 1
- name: Retreive certificats key on {{ lb_kubemaster }}
- shell: set -o pipefail && kubeadm init phase upload-certs --upload-certs | grep -v upload-certs
+ ansible.builtin.shell: set -o pipefail && kubeadm init phase upload-certs --upload-certs | grep -v upload-certs
register: kubernetes_certificateKey
+ check_mode: false
delegate_to: "{{ lb_kubemaster }}"
when:
- - groups['KubernetesMasterConfigured'] is not defined
- server_enrolled.rc == 1
- kubernetes_master|bool
- name: Retreive token on "{{ lb_kubemaster }}"
- command: kubeadm token create
+ ansible.builtin.command: kubeadm token create
register: kubetoken
delegate_to: "{{ lb_kubemaster }}"
+ check_mode: false
when:
- - groups['KubernetesMasterConfigured'] is not defined
- server_enrolled.rc == 1
- name: Retreive hash certificat
- shell: >
+ ansible.builtin.shell: >
set -o pipefail &&
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |
openssl rsa -pubin -outform der 2>/dev/null |
@@ -261,23 +333,21 @@
sed 's/^.* //'
register: cacerthash
delegate_to: "{{ lb_kubemaster }}"
+ check_mode: false
when:
- - groups['KubernetesMasterConfigured'] is not defined
- server_enrolled.rc == 1
- name: Deploy kubeadm config
- template:
+ ansible.builtin.template:
src: kubeadm-config.yaml.j2
dest: /root/kubeadm-config.yaml
owner: root
group: root
mode: 0600
when:
- - groups['KubernetesMasterConfigured'] is not defined
- server_enrolled.rc == 1
- name: Join '{{ ansible_hostname }}' to Kubernetes cluster
- command: kubeadm join --config=/root/kubeadm-config.yaml
+ ansible.builtin.command: kubeadm join --config=/root/kubeadm-config.yaml
when:
- - groups['KubernetesMasterConfigured'] is not defined
- server_enrolled.rc == 1
diff --git a/tasks/install_server.yml b/tasks/install_server.yml
index 0dac5ad..a9d0b9e 100644
--- a/tasks/install_server.yml
+++ b/tasks/install_server.yml
@@ -1,52 +1,53 @@
---
+- name: Include vars for not taint Kubernetes masters
+ ansible.builtin.include_vars: masters.yml
+ when:
+ - kubernetes_master|bool
+ - not kubernetes_master_taint|bool
+
+- name: Add master to KubernetesMasters_ClusterName group
+ ansible.builtin.group_by:
+ key: KubernetesMasters_{{ kubernetes_cluster_name }}
+ check_mode: false
+ when:
+ - "'KubernetesMasters' in group_names"
+
+- name: Add node to KubernetesNodes_ClusterName group
+ ansible.builtin.group_by:
+ key: KubernetesNodes_{{ kubernetes_cluster_name }}
+ check_mode: false
+ when:
+ - "'KubernetesNodes' in group_names"
+
+
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
- command: swapoff -a
+ ansible.builtin.command: swapoff -a
changed_when: false
- name: Remove swapfile from /etc/fstab (2/2)
- mount:
+ ansible.posix.mount:
name: swap
fstype: swap
state: absent
- name: Create a thin pool for kubernetes
- lvol:
+ community.general.lvol:
vg: vg_sys
thinpool: kubernetes
- size: 20g
+ size: "{{ lv_kubernetes_size | default('20g') }}"
-# Install API loadbalancer
-- include_tasks: "load_balancer.yml"
- when:
- - kubernetes_master|bool
- - groups['KubernetesMasters'] | length > 1
-
-- name: Audit policies directory
- file:
- path: "/etc/kubernetes/policies"
- state: directory
- owner: root
- group: root
- mode: 0700
- when:
- - kubernetes_master|bool
-
-- name: Configure audit policy
- copy:
- src: "etc/kubernetes/policies/audit-policy.yaml"
- dest: "/etc/kubernetes/policies/audit-policy.yaml"
- group: root
- owner: root
- mode: 0644
- when:
- - kubernetes_master|bool
+## Install API loadbalancer
+# - ansible.builtin.include_tasks: "load_balancer.yml"
+# when:
+# - kubernetes_master|bool
+# - groups['KubernetesMasters'] | length > 1
- name: Kubernetes cluster with kubeadm
- include_tasks: "cluster_kubeadm.yml"
+ ansible.builtin.include_tasks: "cluster_kubeadm.yml"
when:
- kubernetes_cri != "k3s"
- name: Kubernetes cluster with k3s
- include_tasks: "cluster_k3s.yml"
+ ansible.builtin.include_tasks: "cluster_k3s.yml"
when:
- kubernetes_cri == "k3s"
@@ -55,7 +56,7 @@
#
- name: Make /root/.kube directory
- file:
+ ansible.builtin.file:
path: "/root/.kube"
owner: root
group: root
@@ -65,10 +66,10 @@
- kubernetes_master|bool
- name: Copy kubeconfig file from /etc/kubernetes/admin.conf
- copy:
+ ansible.builtin.copy:
src: "/etc/kubernetes/admin.conf"
dest: /root/.kube/config
- remote_src: yes
+ remote_src: true
owner: root
group: root
mode: 0600
@@ -77,10 +78,10 @@
- kubernetes_cri != "k3s"
- name: Copy kubeconfig file from /etc/rancher/k3s/k3s.yaml
- copy:
+ ansible.builtin.copy:
src: "/etc/rancher/k3s/k3s.yaml"
dest: /root/.kube/config
- remote_src: yes
+ remote_src: true
owner: root
group: root
mode: 0600
@@ -88,27 +89,40 @@
- kubernetes_master|bool
- kubernetes_cri == "k3s"
+- name: Make link from /etc/rancher/k3s/k3s.yaml to /etc/kubernetes/admin.conf
+ file:
+ src: "/etc/rancher/k3s/k3s.yaml"
+ state: link
+ dest: "/etc/kubernetes/admin.conf"
+ force: yes
+ owner: root
+ group: root
+ when:
+ - kubernetes_master|bool
+ - kubernetes_cri == "k3s"
+
#
# Manque autoconfig de .kube/config local
#
-#- name: Fetching CA certificat
-# copy:
-# src: /etc/kubernetes/pki/ca.crt
-# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
-# when:
-# - kubernetes_master|bigip_pool
+# - name: Fetching CA certificat
+# ansible.builtin.copy:
+# src: /etc/kubernetes/pki/ca.crt
+# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
+# when:
+# - kubernetes_master|bigip_pool
+# kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints --no-headers
- name: Check if a node is still tainted
- command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
+ ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
register: current_taint
+ check_mode: false
when:
- kubernetes_master_taint|bool
-- name: taint the machine if needed
-# command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
- command: kubectl taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
+- name: Taint the machine if needed
+# ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
+ ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
when:
- kubernetes_master_taint|bool
- current_taint.stdout
-
diff --git a/tasks/load_balancer.yml b/tasks/load_balancer.yml
index aa9413d..d557058 100644
--- a/tasks/load_balancer.yml
+++ b/tasks/load_balancer.yml
@@ -1,22 +1,21 @@
---
- name: Install needed packages
- package:
- name: "{{ item }}"
+ ansible.builtin.package:
+ name:
+ - keepalived
+ - curl
state: present
- update_cache: yes
- with_items:
- - keepalived
- - curl
+ update_cache: true
notify: Restart keepalived
- name: Install check_apiserver.sh script for keepalived
- template:
+ ansible.builtin.template:
src: etc/keepalived/check_apiserver.sh.j2
dest: /etc/keepalived/check_apiserver.sh
owner: root
group: root
mode: 0755
- name: Install keepalived config file
- template:
+ ansible.builtin.template:
src: etc/keepalived/keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
owner: root
@@ -28,7 +27,7 @@
- groups['KubernetesMasters'][0] == ansible_hostname
notify: Restart keepalived
- name: Install keepalived config file
- template:
+ ansible.builtin.template:
src: etc/keepalived/keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
owner: root
@@ -39,3 +38,6 @@
when:
- not groups['KubernetesMasters'][0] == ansible_hostname
notify: Restart keepalived
+
+ - name: Flush handlers
+ ansible.builtin.meta: flush_handlers
diff --git a/tasks/main.yml b/tasks/main.yml
index b57a12b..9283b29 100644
--- a/tasks/main.yml
+++ b/tasks/main.yml
@@ -1,57 +1,65 @@
---
-- name: Include vars for {{ ansible_os_family }}
- include_vars: "{{ ansible_os_family }}.yml"
+- name: Kubernetes Install
+ tags:
+ - kubernetes
+ block:
+ - name: Include vars for {{ ansible_os_family }}
+ ansible.builtin.include_vars: "{{ ansible_os_family }}.yml"
-- name: Define vars for master
- set_fact:
- kubernetes_server: true
- kubernetes_master: true
- kubernetes_master_taint: false
- when:
- - "'KubernetesMasters' in group_names"
- - "'KubernetesNodes' not in group_names"
+ - name: Define vars for master
+ ansible.builtin.set_fact:
+ kubernetes_server: true
+ kubernetes_master: true
+ kubernetes_master_taint: false
+ check_mode: false
+ when:
+ - "'KubernetesMasters' in group_names"
+ - "'KubernetesNodes' not in group_names"
-- name: Define vars for node
- set_fact:
- kubernetes_server: true
- kubernetes_master: false
- kubernetes_master_taint: false
- when:
- - "'KubernetesNodes' in group_names"
- - "'KubernetesMasters' not in group_names"
+ - name: Define vars for node
+ ansible.builtin.set_fact:
+ kubernetes_server: true
+ kubernetes_master: false
+ kubernetes_master_taint: false
+ check_mode: false
+ when:
+ - "'KubernetesNodes' in group_names"
+ - "'KubernetesMasters' not in group_names"
-- name: Define vars for taint master
- set_fact:
- kubernetes_server: true
- kubernetes_master: true
- kubernetes_master_taint: true
- when:
- - "'KubernetesNodes' in group_names"
- - "'KubernetesMasters' in group_names"
+ - name: Define vars for taint master
+ ansible.builtin.set_fact:
+ kubernetes_server: true
+ kubernetes_master: true
+ kubernetes_master_taint: true
+ check_mode: false
+ when:
+ - "'KubernetesNodes' in group_names"
+ - "'KubernetesMasters' in group_names"
-- name: Define vars for tooling
- set_fact:
- kubernetes_sever: false
- when:
- - "'KubernetesMasters' not in group_names"
- - "'KubernetesNodes' not in group_names"
+ - name: Define vars for tooling
+ ansible.builtin.set_fact:
+ kubernetes_sever: false
+ check_mode: false
+ when:
+ - "'KubernetesMasters' not in group_names"
+ - "'KubernetesNodes' not in group_names"
-- name: Install kubernetes rules for {{ ansible_os_family }} OS family
- include_tasks: "{{ ansible_os_family }}.yml"
+ - name: Install kubernetes rules for {{ ansible_os_family }} OS family
+ ansible.builtin.include_tasks: "{{ ansible_os_family }}.yml"
-- name: Install kubernetes tools
- package:
- name: "{{ kubernetes_package_name }}"
- state: present
- update_cache: yes
-# notify: Restart kubelet
- when:
- - (not kubernetes_server|bool) or ( kubernetes_server|bool and kubernetes_cri != "k3s")
+ # - name: Install kubernetes tools
+ # ansible.builtin.package:
+ # name: "{{ kubernetes_package_name }}"
+ # state: present
+ # update_cache: true
+ ## notify: Restart kubelet
+ # when:
+ # - (not kubernetes_server|bool) or ( kubernetes_server|bool and kubernetes_cri != "k3s")
-- name: Include kubernetes server rules
- include_tasks: "install_server.yml"
- when:
- - kubernetes_server|bool
+ - name: Include kubernetes server rules
+ ansible.builtin.include_tasks: "install_server.yml"
+ when:
+ - kubernetes_server|bool
-#- name: Install python library for docker
-# package: name="{{ python_openshift_lib }}" state=latest update_cache=yes
+ # - name: Install python library for docker
+ # package: name="{{ python_openshift_lib }}" state=latest update_cache=yes
diff --git a/templates/etc/firewalld/services/kubernetes.xml.j2 b/templates/etc/firewalld/services/kubernetes.xml.j2
index d4d0a53..95f0583 100644
--- a/templates/etc/firewalld/services/kubernetes.xml.j2
+++ b/templates/etc/firewalld/services/kubernetes.xml.j2
@@ -9,6 +9,12 @@
{% if kubernetes_master == true %}
# Kubernetes API server, used by all
+{% endif %}
+{% if kubernetes_cri == "k3s" %}
+# K3S with flannel and wireguard
+
+{% else %}
+{% if kubernetes_master == true %}
# etcd server client API, used by kube-apiserver and etcd
@@ -18,8 +24,8 @@
# kube-controler-manager, used by self
-# ???
-
+# Read-only Kubelet API (Deprecated)
+#
{% else %}
{% endif %}
@@ -35,4 +41,5 @@
{% endif %}
+{% endif %}
diff --git a/templates/etc/kubernetes/audit-webhook-kubeconfig.j2 b/templates/etc/kubernetes/audit-webhook-kubeconfig.j2
new file mode 100644
index 0000000..781d08c
--- /dev/null
+++ b/templates/etc/kubernetes/audit-webhook-kubeconfig.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: falco
+ cluster:
+ server: http://$FALCO_SERVICE_CLUSTERIP:8765/k8s-audit
+contexts:
+- context:
+ cluster: falco
+ user: ""
+ name: default-context
+current-context: default-context
+preferences: {}
+users: []
\ No newline at end of file
diff --git a/templates/etc/rancher/k3s/config.yaml.j2 b/templates/etc/rancher/k3s/config.yaml.j2
new file mode 100644
index 0000000..e55fb12
--- /dev/null
+++ b/templates/etc/rancher/k3s/config.yaml.j2
@@ -0,0 +1,55 @@
+{% if 'Vpn' in group_names %}
+flannel-backend: vxlan
+{% else %}
+flannel-backend: wireguard-native
+{% endif %}
+protect-kernel-defaults: true
+{% if kubernetes_master|bool %}
+secrets-encryption: true
+kube-apiserver-arg:
+ - "enable-admission-plugins=NodeRestriction,AlwaysPullImages,EventRateLimit"
+ - 'admission-control-config-file=/etc/kubernetes/psa.yaml'
+ - 'audit-log-path=/var/log/apiserver/audit.log'
+ - 'audit-policy-file=/etc/kubernetes/policies/audit-policy.yaml'
+ - 'audit-log-maxage=30'
+ - 'audit-log-maxbackup=10'
+ - 'audit-log-maxsize=100'
+# - "request-timeout=300s"
+kube-controller-manager-arg:
+ - 'terminated-pod-gc-threshold=10'
+cluster-cidr:
+ - {{ kubernetes_pods_network }}
+service-cidr:
+ - {{ kubernetes_svc_network }}
+{% if vars['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is not defined %}
+cluster-init: true
+{% else %}
+server: https://{{ kubernetes_master }}:6443
+token: ${NODE_TOKEN}
+{% endif %}
+{% else %}
+server: https://{{ kubernetes_master }}:6443
+token: ${NODE_TOKEN}
+{% endif %}
+kubelet-arg:
+ - 'streaming-connection-idle-timeout=5m'
+ - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
+{% if false %}
+ - 'feature-gates=NodeSwap=true,CloudDualStackNodeIPs=true'
+{% endif %}
+{% if ansible_os_family == "RedHat" %}
+selinux: true
+{% endif %}
+#embedded-registry: true
+disable:
+ - traefik
+{% if lookup('vars', 'ansible_' + kubernetes_interface ) != ansible_host %}
+node-external-ip: {{ ansible_host }}
+{% endif %}
+node-ip: {{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}
+{% if false %}
+# node-external-ip: 1.2.3.4
+#node-label:
+# - "foo=bar"
+# - "something=amazing"
+{% endif %}
diff --git a/templates/etc/sysconfig/kubelet.j2 b/templates/etc/sysconfig/kubelet.j2
index b02129d..92eaebd 100644
--- a/templates/etc/sysconfig/kubelet.j2
+++ b/templates/etc/sysconfig/kubelet.j2
@@ -1,2 +1,2 @@
#https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates
-KUBELET_EXTRA_ARGS="--container-runtime=remote --container-runtime-endpoint={% if kubernetes_cri == "containerd" %}unix:///run/containerd/containerd.sock{% elif kubernetes_cri == "cri-o" %}unix:///var/run/crio/crio.sock{% endif %} --node-ip={{ ansible_default_ipv4.address }}"
+KUBELET_EXTRA_ARGS="--container-runtime=remote --container-runtime-endpoint={% if kubernetes_cri == "containerd" %}unix:///run/containerd/containerd.sock{% elif kubernetes_cri == "cri-o" %}unix:///var/run/crio/crio.sock{% endif %} --node-ip={{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}"
diff --git a/templates/etc/systemd/system/k3s.service.j2 b/templates/etc/systemd/system/k3s.service.j2
index 95ac5ae..206c539 100644
--- a/templates/etc/systemd/system/k3s.service.j2
+++ b/templates/etc/systemd/system/k3s.service.j2
@@ -7,9 +7,9 @@ After=network-online.target
Type=notify
EnvironmentFile=/etc/systemd/system/k3s.service.env
{% if kubernetes_master|bool %}
-ExecStart=/usr/local/bin/k3s server --flannel-backend=wireguard --disable traefik --secrets-encryption
+ExecStart=/usr/local/bin/k3s server
{% else %}
-ExecStart=/usr/local/bin/k3s agent --server https://{{ kubernetes_master }}:6443 --token ${NODE_TOKEN}
+ExecStart=/usr/local/bin/k3s agent
{% endif %}
KillMode=process
Delegate=yes
diff --git a/templates/etc/systemd/system/kubelet.service.d/0-kubelet-extra-args.conf.j2 b/templates/etc/systemd/system/kubelet.service.d/0-kubelet-extra-args.conf.j2
index e6f3596..fde66b0 100644
--- a/templates/etc/systemd/system/kubelet.service.d/0-kubelet-extra-args.conf.j2
+++ b/templates/etc/systemd/system/kubelet.service.d/0-kubelet-extra-args.conf.j2
@@ -1,2 +1,2 @@
[Service]
-Environment=KUBELET_EXTRA_ARGS="--container-runtime=remote --container-runtime-endpoint={% if kubernetes_cri == "containerd" %}unix:///run/containerd/containerd.sock{% elif kubernetes_cri == "cri-o" %}unix:///var/run/crio/crio.sock{% endif %} --node-ip={{ ansible_default_ipv4.address }}"
+Environment=KUBELET_EXTRA_ARGS="--container-runtime=remote --container-runtime-endpoint={% if kubernetes_cri == "containerd" %}unix:///run/containerd/containerd.sock{% elif kubernetes_cri == "cri-o" %}unix:///var/run/crio/crio.sock{% endif %} --node-ip={{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}"
diff --git a/templates/etc/systemd/system/kubelet.service.d/11-cgroups.conf.j2 b/templates/etc/systemd/system/kubelet.service.d/11-cgroups.conf.j2
new file mode 100644
index 0000000..403ae0e
--- /dev/null
+++ b/templates/etc/systemd/system/kubelet.service.d/11-cgroups.conf.j2
@@ -0,0 +1,5 @@
+# https://stackoverflow.com/a/57456786
+# https://stackoverflow.com/questions/57456667/failed-to-get-kubelets-cgroup
+[Service]
+CPUAccounting=true
+MemoryAccounting=true
diff --git a/templates/etc/systemd/system/kubelet.service.d/20-allow-swap.conf.j2 b/templates/etc/systemd/system/kubelet.service.d/20-allow-swap.conf.j2
new file mode 100644
index 0000000..3f54876
--- /dev/null
+++ b/templates/etc/systemd/system/kubelet.service.d/20-allow-swap.conf.j2
@@ -0,0 +1 @@
+[Service] Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false"
diff --git a/templates/etc/yum.repos.d/kubernetes.repo.j2 b/templates/etc/yum.repos.d/kubernetes.repo.j2
index 7ac0fdb..b04037a 100644
--- a/templates/etc/yum.repos.d/kubernetes.repo.j2
+++ b/templates/etc/yum.repos.d/kubernetes.repo.j2
@@ -1,7 +1,7 @@
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-{{ ansible_machine }}
-enabled=1
+enabled=0
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
diff --git a/templates/kubeadm-config.yaml.j2 b/templates/kubeadm-config.yaml.j2
index 9a95c08..2adf101 100644
--- a/templates/kubeadm-config.yaml.j2
+++ b/templates/kubeadm-config.yaml.j2
@@ -1,4 +1,4 @@
-apiVersion: kubeadm.k8s.io/v1beta2
+apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
{% if kubetoken is defined %}
bootstrapTokens:
@@ -14,8 +14,9 @@ nodeRegistration:
{% elif kubernetes_cri == "docker" %}
criSocket: "/var/run/docker.sock"
{% endif %}
+ name: {{ ansible_hostname }}
{% if false %}
- name: "ec2-10-100-0-1"
+ imagePullPolicy: IfNotPresent
taints:
- key: "kubeadmNode"
value: "master"
@@ -25,70 +26,47 @@ nodeRegistration:
{% if ansible_service_mgr == "systemd" %}
cgroup-driver: "systemd"
{% endif %}
- container-runtime: "remote"
runtime-request-timeout: "5m"
-{% if kubernetes_cri == "containerd" %}
- container-runtime-endpoint: "unix:///run/containerd/containerd.sock"
-{% elif kubernetes_cri == "cri-o" %}
- container-runtime-endpoint: "unix:///var/run/crio/crio.sock"
-{% endif %}
- node-ip: {{ ansible_default_ipv4.address }}
+ node-ip: {{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}
read-only-port: "10255"
ignorePreflightErrors:
- SystemVerification
+{% if (kubernetes_master|bool and not kubernetes_master_taint|bool) %}
+ - NumCPU
+{% endif %}
{% if true == false %}
- IsPrivilegedUser
{% endif %}
localAPIEndpoint:
- advertiseAddress: "{{ ansible_default_ipv4.address }}"
+ advertiseAddress: "{{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}"
bindPort: 6443
-{% if kubernetes_master|bool and groups['KubernetesMasterConfigured'] is defined %}
+{% if kubernetes_master|bool and groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is defined %}
certificateKey: "{{ kubernetes_certificateKey.stdout }}"
{% endif %}
+{% if kubernetes_master|bool and groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is not defined %}
---
-apiVersion: kubeadm.k8s.io/v1beta2
-kind: JoinConfiguration
-{% if kubernetes_master|bool %}
-controlPlane:
- localAPIEndpoint:
- advertiseAddress: "{{ ansible_default_ipv4.address }}"
- bindPort: 6443
-{% if groups['KubernetesMasterConfigured'] is defined %}
- certificateKey: "{{ kubernetes_certificateKey.stdout }}"
-{% endif %}
-{% endif %}
-discovery:
- bootstrapToken:
- apiServerEndpoint: "{{ lb_kubemaster }}:6443"
-{% if groups['KubernetesMasterConfigured'] is defined %}
- caCertHashes:
- - sha256:{{ cacerthash.stdout }}
- token: "{{ kubetoken.stdout }}"
-{% endif %}
-nodeRegistration:
- kubeletExtraArgs:
- node-ip: {{ ansible_default_ipv4.address }}
- read-only-port: "10255"
- ignorePreflightErrors:
- - SystemVerification
----
-apiVersion: kubeadm.k8s.io/v1beta2
+apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: stable
{% if lbip_kubeapiserver is defined %}
controlPlaneEndpoint: "{{ lbip_kubeapiserver }}:6443"
{% else %}
-controlPlaneEndpoint: "{{ ansible_default_ipv4.address }}:6443"
+controlPlaneEndpoint: "{{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}:6443"
{% endif %}
apiServer:
extraArgs:
- enable-admission-plugins: NodeRestriction,PodSecurityPolicy
+ enable-admission-plugins: NodeRestriction,AlwaysPullImages,EventRateLimit
authorization-mode: "Node,RBAC"
audit-policy-file: "/etc/kubernetes/policies/audit-policy.yaml"
audit-log-path: "/var/log/apiserver/audit.log"
audit-log-maxage: "30"
audit-log-maxbackup: "10"
audit-log-maxsize: "100"
+{% if false %}
+# Falco
+ audit-webhook-config-file: "/etc/kubernetes/policies/audit-webhook-kubeconfig"
+ audit-webhook-batch-max-wait: "5s"
+{% endif %}
extraVolumes:
- name: "audit-log"
hostPath: "/var/log/apiserver"
@@ -96,35 +74,163 @@ apiServer:
readOnly: false
pathType: DirectoryOrCreate
- name: "audit-policies"
- hostPath: "/etc/kubernetes/policies"
- mountPath: "/etc/kubernetes/policies"
+ hostPath: "/etc/kubernetes/policies/audit-policy.yaml"
+ mountPath: "/etc/kubernetes/policies/audit-policy.yaml"
readOnly: false
- pathType: DirectoryOrCreate
+ pathType: File
{% if lb_kubemaster is defined %}
certSANs:
- "{{ lb_kubemaster }}"
{% endif %}
{% if kubernetes_network == "flannel" or kubernetes_network == "calico" %}
networking:
-{% if kubernetes_network == "flannel" %}
- podSubnet: "10.244.0.0/16"
-{% elif kubernetes_network == "calico" %}
- podSubnet: "192.168.0.0/16"
+ podSubnet: "{{ kubernetes_pods_network }}"
+ serviceSubnet: "{{ kubernetes_svc_network }}"
+{% endif %}
+controllerManager:
+ extraArgs:
+ bind-address: 0.0.0.0
+scheduler:
+ extraArgs:
+ bind-address: 0.0.0.0
+etcd:
+ local:
+ dataDir: /var/lib/etcd
+ extraArgs:
+ listen-metrics-urls: http://0.0.0.0:2381
+{% endif %}
+{% if not kubernetes_master|bool or kubernetes_master|bool and groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is defined %}
+---
+apiVersion: kubeadm.k8s.io/v1beta3
+kind: JoinConfiguration
+{% if kubernetes_master|bool %}
+controlPlane:
+ localAPIEndpoint:
+ advertiseAddress: "{{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}"
+ bindPort: 6443
+{% if groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is defined %}
+ certificateKey: "{{ kubernetes_certificateKey.stdout }}"
+{% endif %}
+{% endif %}
+discovery:
+ bootstrapToken:
+ apiServerEndpoint: "{{ lb_kubemaster }}:6443"
+{% if groups['KubernetesMasterConfigured_' ~ kubernetes_cluster_name] is defined %}
+ caCertHashes:
+ - sha256:{{ cacerthash.stdout }}
+ token: "{{ kubetoken.stdout }}"
+{% endif %}
+nodeRegistration:
+ kubeletExtraArgs:
+ node-ip: {{ lookup('vars', 'ansible_' + kubernetes_interface ).ipv4.address }}
+# read-only-port: "10255"
+ ignorePreflightErrors:
+ - SystemVerification
+{% if kubernetes_swap_enabled is defined and kubernetes_swap_enabled|bool %}
+ - Swap
+{% endif %}
+{% if (kubernetes_master|bool and not kubernetes_master_taint|bool) %}
+ - NumCPU
{% endif %}
{% endif %}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
+metricsBindAddress: "0.0.0.0:10249"
{% if kubernetes_kubeproxy_mode is defined %}
mode: {{ kubernetes_kubeproxy_mode }}
+{% if kubernetes_kubeproxy_mode == "ipvs" %}
+ipvs:
+ strictARP: true
+{% endif %}
{% endif %}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
-runtimeRequestTimeout: 5m
+#authentication:
+# anonymous:
+# enabled: false
+# webhook:
+# cacheTTL: 2m0s
+# enabled: true
+# x509:
+# clientCAFile: /etc/kubernetes/pki/ca.crt
+#authorization:
+# mode: Webhook
+# webhook:
+# cacheAuthorizedTTL: 5m0s
+# cacheUnauthorizedTTL: 30s
{% if ansible_service_mgr == "systemd" %}
cgroupDriver: systemd
{% endif %}
+#cgroupsPerQOS: true
+#clusterDNS:
+#- 10.96.0.10
+#clusterDomain: cluster.local
+#configMapAndSecretChangeDetectionStrategy: Watch
+#containerLogMaxFiles: 5
+#containerLogMaxSize: 10Mi
+#contentType: application/vnd.kubernetes.protobuf
+#cpuCFSQuota: true
+#cpuCFSQuotaPeriod: 100ms
+#cpuManagerPolicy: none
+#cpuManagerReconcilePeriod: 10s
+#enableControllerAttachDetach: true
+#enableDebuggingHandlers: true
+#enforceNodeAllocatable:
+#- pods
+#eventBurst: 10
+#eventRecordQPS: 5
+#evictionHard:
+# imagefs.available: 15%
+# memory.available: 500Mi
+# nodefs.available: 10%
+# nodefs.inodesFree: 5%
+#evictionPressureTransitionPeriod: 5m0s
+{% if kubernetes_swap_enabled is defined and kubernetes_swap_enabled|bool %}
+# Activation du swap
+failSwapOn: false
+featureGates:
+ NodeSwap: true
+memorySwap:
+ swapBehavior: UnlimitedSwap
+{% endif %}
+#fileCheckFrequency: 20s
+#hairpinMode: promiscuous-bridge
+#healthzBindAddress: 127.0.0.1
+#healthzPort: 10248
+#httpCheckFrequency: 20s
+#imageGCHighThresholdPercent: 85
+#imageGCLowThresholdPercent: 80
+#imageMinimumGCAge: 2m0s
+#iptablesDropBit: 15
+#iptablesMasqueradeBit: 14
+#kubeAPIBurst: 10
+#kubeAPIQPS: 5
+#logging: {}
+#makeIPTablesUtilChains: true
+#maxOpenFiles: 1000000
+#maxPods: 110
+#memorySwap: {}
+#nodeLeaseDurationSeconds: 40
+#nodeStatusReportFrequency: 1m0s
+#nodeStatusUpdateFrequency: 10s
+#oomScoreAdj: -999
+#podPidsLimit: -1
+#port: 10250
+#registryBurst: 10
+#registryPullQPS: 5
+#resolvConf: /etc/resolv.conf
+#rotateCertificates: true
+runtimeRequestTimeout: 5m
+#serializeImagePulls: true
+#shutdownGracePeriod: 0s
+#shutdownGracePeriodCriticalPods: 0s
+#staticPodPath: /etc/kubernetes/manifests
+#streamingConnectionIdleTimeout: 4h0m0s
+#syncFrequency: 1m0s
+#topologyManagerPolicy: none
+#volumeStatsAggPeriod: 1m0s
{% if false %}
readOnlyPort: 1
diff --git a/templates/var/lib/rancher/k3s/server/manifests/np-00-intra-namespace.yaml.j2 b/templates/var/lib/rancher/k3s/server/manifests/np-00-intra-namespace.yaml.j2
new file mode 100644
index 0000000..8775180
--- /dev/null
+++ b/templates/var/lib/rancher/k3s/server/manifests/np-00-intra-namespace.yaml.j2
@@ -0,0 +1,12 @@
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: intra-namespace
+ namespace: kube-system
+spec:
+ podSelector: {}
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ name: kube-system
diff --git a/templates/var/lib/rancher/k3s/server/manifests/np-01-default-network-dns-policy.yaml.j2 b/templates/var/lib/rancher/k3s/server/manifests/np-01-default-network-dns-policy.yaml.j2
new file mode 100644
index 0000000..9357b4f
--- /dev/null
+++ b/templates/var/lib/rancher/k3s/server/manifests/np-01-default-network-dns-policy.yaml.j2
@@ -0,0 +1,17 @@
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: default-network-dns-policy
+ namespace: kube-system
+spec:
+ ingress:
+ - ports:
+ - port: 53
+ protocol: TCP
+ - port: 53
+ protocol: UDP
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ policyTypes:
+ - Ingress
diff --git a/templates/var/lib/rancher/k3s/server/manifests/np-03-metrics-server-traefik.yaml.j2 b/templates/var/lib/rancher/k3s/server/manifests/np-03-metrics-server-traefik.yaml.j2
new file mode 100644
index 0000000..e7b8621
--- /dev/null
+++ b/templates/var/lib/rancher/k3s/server/manifests/np-03-metrics-server-traefik.yaml.j2
@@ -0,0 +1,42 @@
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: allow-all-metrics-server
+ namespace: kube-system
+spec:
+ podSelector:
+ matchLabels:
+ k8s-app: metrics-server
+ ingress:
+ - {}
+ policyTypes:
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: allow-all-svclbtraefik-ingress
+ namespace: kube-system
+spec:
+ podSelector:
+ matchLabels:
+ svccontroller.k3s.cattle.io/svcname: traefik
+ ingress:
+ - {}
+ policyTypes:
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: allow-all-traefik-v121-ingress
+ namespace: kube-system
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: traefik
+ ingress:
+ - {}
+ policyTypes:
+ - Ingress
+---
diff --git a/vars/RedHat.yml b/vars/RedHat.yml
index a5905f3..4eecd17 100644
--- a/vars/RedHat.yml
+++ b/vars/RedHat.yml
@@ -1,8 +1,9 @@
---
kubernetes_package_name:
- - kubectl
- - kubelet
- - kubeadm
+ - kubectl-{{ kubernetes_version }}
+ - kubelet-{{ kubernetes_version }}
+ - kubeadm-{{ kubernetes_version }}
- iproute-tc
+ - ipvsadm
#kubernetes_remove_packages_name:
# - kubernetes.io
diff --git a/vars/masters.yml b/vars/masters.yml
new file mode 100644
index 0000000..4a03a33
--- /dev/null
+++ b/vars/masters.yml
@@ -0,0 +1,3 @@
+---
+lv_containers_size: 2g
+lv_kubernetes_size: 8g
\ No newline at end of file