ansible-role-kubernetes/tasks/install_server.yml
Adrien 58fae5e1bd
Some checks failed
continuous-integration/drone/push Build is failing
Update kubelet config
2020-09-17 01:15:23 +02:00

111 lines
2.6 KiB
YAML

---
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
command: swapoff -a
changed_when: false
- name: Remove swapfile from /etc/fstab (2/2)
mount:
name: swap
fstype: swap
state: absent
- name: Create a thin pool for kubernetes
lvol:
vg: vg_sys
thinpool: kubernetes
size: 20g
# Install API loadbalancer
- include_tasks: "load_balancer.yml"
when:
- kubernetes_master|bool
- groups['KubernetesMasters'] | length > 1
- name: Audit policies directory
file:
path: "/etc/kubernetes/policies"
state: directory
owner: root
group: root
mode: 0700
when:
- kubernetes_master|bool
- name: Configure audit policy
copy:
src: "etc/kubernetes/policies/audit-policy.yaml"
dest: "/etc/kubernetes/policies/audit-policy.yaml"
group: root
owner: root
mode: 0644
when:
- kubernetes_master|bool
- name: Kubernetes cluster with kubeadm
include_tasks: "cluster_kubeadm.yml"
when:
- kubernetes_cri != "k3s"
- name: Kubernetes cluster with k3s
include_tasks: "cluster_k3s.yml"
when:
- kubernetes_cri == "k3s"
#
# At this point, we have a kubernetes up and running, but ready for it
#
- name: Make /root/.kube directory
file:
path: "/root/.kube"
owner: root
group: root
mode: 0700
state: directory
when:
- kubernetes_master|bool
- name: Copy kubeconfig file from /etc/kubernetes/admin.conf
copy:
src: "/etc/kubernetes/admin.conf"
dest: /root/.kube/config
remote_src: yes
owner: root
group: root
mode: 0600
when:
- kubernetes_master|bool
- kubernetes_cri != "k3s"
- name: Copy kubeconfig file from /etc/rancher/k3s/k3s.yaml
copy:
src: "/etc/rancher/k3s/k3s.yaml"
dest: /root/.kube/config
remote_src: yes
owner: root
group: root
mode: 0600
when:
- kubernetes_master|bool
- kubernetes_cri == "k3s"
#
# Manque autoconfig de .kube/config local
#
#- name: Fetching CA certificat
# copy:
# src: /etc/kubernetes/pki/ca.crt
# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
# when:
# - kubernetes_master|bigip_pool
- name: Check if a node is still tainted
command: kubectl get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
when: kubernetes_master_taint
register: current_taint
- name: taint the machine if needed
# command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
command: kubectl taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
when: kubernetes_master_taint|bool and current_taint.stdout