--- - name: Include vars for not taint Kubernetes masters include_vars: masters.yml when: - kubernetes_master|bool - not kubernetes_master_taint|bool - name: Add master to KubernetesMasters_ClusterName group group_by: key: KubernetesMasters_{{ kubernetes_cluster_name }} when: - "'KubernetesMasters' in group_names" - name: Add node to KubernetesNodes_ClusterName group group_by: key: KubernetesNodes_{{ kubernetes_cluster_name }} when: - "'KubernetesNodes' in group_names" - name: Disable SWAP since kubernetes can't work with swap enabled (1/2) command: swapoff -a changed_when: false - name: Remove swapfile from /etc/fstab (2/2) mount: name: swap fstype: swap state: absent - name: Create a thin pool for kubernetes lvol: vg: vg_sys thinpool: kubernetes size: "{{ lv_kubernetes_size | default('20g') }}" ## Install API loadbalancer #- include_tasks: "load_balancer.yml" # when: # - kubernetes_master|bool # - groups['KubernetesMasters'] | length > 1 - name: Kubernetes cluster with kubeadm include_tasks: "cluster_kubeadm.yml" when: - kubernetes_cri != "k3s" - name: Kubernetes cluster with k3s include_tasks: "cluster_k3s.yml" when: - kubernetes_cri == "k3s" # # At this point, we have a kubernetes up and running, but ready for it # - name: Make /root/.kube directory file: path: "/root/.kube" owner: root group: root mode: 0700 state: directory when: - kubernetes_master|bool - name: Copy kubeconfig file from /etc/kubernetes/admin.conf copy: src: "/etc/kubernetes/admin.conf" dest: /root/.kube/config remote_src: yes owner: root group: root mode: 0600 when: - kubernetes_master|bool - kubernetes_cri != "k3s" - name: Copy kubeconfig file from /etc/rancher/k3s/k3s.yaml copy: src: "/etc/rancher/k3s/k3s.yaml" dest: /root/.kube/config remote_src: yes owner: root group: root mode: 0600 when: - kubernetes_master|bool - kubernetes_cri == "k3s" # # Manque autoconfig de .kube/config local # #- name: Fetching CA certificat # copy: # src: /etc/kubernetes/pki/ca.crt # dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt # when: # - kubernetes_master|bigip_pool - name: Check if a node is still tainted command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}' register: current_taint check_mode: no when: - kubernetes_master_taint|bool - name: taint the machine if needed # command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master- when: - kubernetes_master_taint|bool - current_taint.stdout