Update role
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
Adrien Reslinger 2022-10-27 23:38:04 +02:00
parent 48e99ac551
commit d4dac488f9
Signed by: adrien
GPG key ID: DA7B27055C66D6DE
7 changed files with 298 additions and 279 deletions

View file

@ -1,51 +1,53 @@
---
- name: Include vars for not taint Kubernetes masters
include_vars: masters.yml
ansible.builtin.include_vars: masters.yml
when:
- kubernetes_master|bool
- not kubernetes_master_taint|bool
- name: Add master to KubernetesMasters_ClusterName group
group_by:
ansible.builtin.group_by:
key: KubernetesMasters_{{ kubernetes_cluster_name }}
check_mode: false
when:
- "'KubernetesMasters' in group_names"
- name: Add node to KubernetesNodes_ClusterName group
group_by:
ansible.builtin.group_by:
key: KubernetesNodes_{{ kubernetes_cluster_name }}
check_mode: false
when:
- "'KubernetesNodes' in group_names"
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
command: swapoff -a
ansible.builtin.command: swapoff -a
changed_when: false
- name: Remove swapfile from /etc/fstab (2/2)
mount:
ansible.posix.mount:
name: swap
fstype: swap
state: absent
- name: Create a thin pool for kubernetes
lvol:
community.general.lvol:
vg: vg_sys
thinpool: kubernetes
size: "{{ lv_kubernetes_size | default('20g') }}"
## Install API loadbalancer
#- include_tasks: "load_balancer.yml"
# when:
# - kubernetes_master|bool
# - groups['KubernetesMasters'] | length > 1
# - ansible.builtin.include_tasks: "load_balancer.yml"
# when:
# - kubernetes_master|bool
# - groups['KubernetesMasters'] | length > 1
- name: Kubernetes cluster with kubeadm
include_tasks: "cluster_kubeadm.yml"
ansible.builtin.include_tasks: "cluster_kubeadm.yml"
when:
- kubernetes_cri != "k3s"
- name: Kubernetes cluster with k3s
include_tasks: "cluster_k3s.yml"
ansible.builtin.include_tasks: "cluster_k3s.yml"
when:
- kubernetes_cri == "k3s"
@ -54,7 +56,7 @@
#
- name: Make /root/.kube directory
file:
ansible.builtin.file:
path: "/root/.kube"
owner: root
group: root
@ -64,10 +66,10 @@
- kubernetes_master|bool
- name: Copy kubeconfig file from /etc/kubernetes/admin.conf
copy:
ansible.builtin.copy:
src: "/etc/kubernetes/admin.conf"
dest: /root/.kube/config
remote_src: yes
remote_src: true
owner: root
group: root
mode: 0600
@ -76,10 +78,10 @@
- kubernetes_cri != "k3s"
- name: Copy kubeconfig file from /etc/rancher/k3s/k3s.yaml
copy:
ansible.builtin.copy:
src: "/etc/rancher/k3s/k3s.yaml"
dest: /root/.kube/config
remote_src: yes
remote_src: true
owner: root
group: root
mode: 0600
@ -91,24 +93,23 @@
# Manque autoconfig de .kube/config local
#
#- name: Fetching CA certificat
# copy:
# src: /etc/kubernetes/pki/ca.crt
# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
# when:
# - kubernetes_master|bigip_pool
# - name: Fetching CA certificat
# ansible.builtin.copy:
# src: /etc/kubernetes/pki/ca.crt
# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
# when:
# - kubernetes_master|bigip_pool
- name: Check if a node is still tainted
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
register: current_taint
check_mode: no
check_mode: false
when:
- kubernetes_master_taint|bool
- name: taint the machine if needed
# command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
- name: Taint the machine if needed
# ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
ansible.builtin.command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
when:
- kubernetes_master_taint|bool
- current_taint.stdout