ansible-role-kubernetes/tasks/install_server.yml
Adrien f160dfa618
Some checks failed
continuous-integration/drone/push Build is failing
Ajout de la reconnaissance de systemd
2020-06-06 01:21:16 +02:00

276 lines
7.2 KiB
YAML

---
- name: Install Containerd
include_role:
name: containerd
when:
- kubernetes_cri == "containerd"
#register: kubernetes_cri_changed
- name: Install CRI-O
include_role:
name: cri-o
when:
- kubernetes_cri == "cri-o"
#register: kubernetes_cri_changed
#- name: Restart kubelet after kubernetes cri installation
# service:
# name: kubelet
# status: restarted
# when:
# - kubernetes_cri_changed is changed
- name: Disable SWAP since kubernetes can't work with swap enabled (1/2)
command: swapoff -a
- name: Remove swapfile from /etc/fstab (2/2)
mount:
name: swap
fstype: swap
state: absent
- name: Configuring IPVS kernel module to be load on boot
template:
src: "etc/modules-load.d/ipvs.conf.j2"
dest: "/etc/modules-load.d/ipvs.conf"
group: root
owner: root
mode: 0644
when:
- kubernetes_kubeproxy_mode == "ipvs"
- name: Load IPVS kernel module
modprobe:
name: "{{ item }}"
state: present
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
- nf_conntrack_ipv6
when:
- kubernetes_kubeproxy_mode == "ipvs"
- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists
file:
path: "/etc/systemd/system/kubelet.service.d"
state: "directory"
group: root
owner: root
mode: 0755
when:
- ansible_service_mgr == "systemd"
- name: Configure kubelet service
template:
src: "etc/{{ item }}.j2"
dest: "/etc/{{ item }}"
group: root
owner: root
mode: 0644
with_items:
- "systemd/system/kubelet.service.d/0-kubelet-extra-args.conf"
# - "sysconfig/kubelet"
when:
- ansible_service_mgr == "systemd"
- name: Configure kubelet service
template:
src: "etc/{{ item }}.j2"
dest: "/etc/{{ item }}"
group: root
owner: root
mode: 0644
with_items:
- "sysconfig/kubelet"
when:
- not ansible_service_mgr == "systemd"
- name: Enable kubelet on boot
service:
name: kubelet
state: started
enabled: yes
# Install API loadbalancer
- include_tasks: "load_balancer.yml"
when:
- kubernetes_master|bool
- groups['KubernetesMasters'] | length > 1
- name: Check if /etc/kubernetes/admin.conf already existe
stat:
path: /etc/kubernetes/admin.conf
register: st
changed_when: False
- name: Create KubernetesMasterConfigured group
group_by:
key: KubernetesMasterConfigured
when:
- st.stat.exists
- name: Retreive kubeadm Major version
shell: set -o pipefail && kubeadm version | sed 's/.*{Major:"\([0-9]\)".*/\1/'
register: kubeadm_version_major
changed_when: False
- name: Retreive kubeadm Minor version
shell: set -o pipefail && kubeadm version | sed -e 's/.* Minor:"\([0-9]*\)".*/\1/'
register: kubeadm_version_minor
changed_when: False
- name: Defined a default lb_kubemaster
set_fact:
lb_kubemaster: "{{ groups['KubernetesMasters'][0] }}"
when:
- lb_kubemaster is undefined
# - groups['KubernetesMasters'] | length > 1
changed_when: False
- name: Secure etcd directory
file:
path: "/var/lib/etcd"
state: directory
owner: root
group: root
mode: 0700
when:
- kubernetes_master|bool
- name: Deploy initial kubeadm config
template:
src: kubeadm-config.yaml.j2
dest: /root/kubeadm-config.yaml
owner: root
group: root
mode: 0600
when:
- groups['KubernetesMasterConfigured'] is not defined
- groups['KubernetesMasters'][0] == ansible_hostname
- kubeadm_version_major.stdout | int == 1
- kubeadm_version_minor.stdout | int >= 15
- name: Init Kubernetes on {{ groups['KubernetesMasters'][0] }}
command: kubeadm init --config=/root/kubeadm-config.yaml
when:
- groups['KubernetesMasterConfigured'] is not defined
- groups['KubernetesMasters'][0] == ansible_hostname
- kubeadm_version_major.stdout | int == 1
- kubeadm_version_minor.stdout | int >= 15
- name: Add {{ ansible_hostname }} to KubernetesMasterConfigured group
group_by:
key: KubernetesMasterConfigured
when:
- groups['KubernetesMasterConfigured'] is not defined
- groups['KubernetesMasters'][0] == ansible_hostname
- name: Test if server node already included
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes {{ ansible_hostname | lower }}
delegate_to: "{{ lb_kubemaster }}"
register: server_enrolled
changed_when: False
ignore_errors: yes
#- name: Deploy kubeadm config
# template:
# src: kubeadm-config.yaml.j2
# dest: /root/kubeadm-config.yaml
# owner: root
# group: root
# mode: 600
# when:
# - not groups['KubernetesMasters'][0] == ansible_hostname
# - kubeadm_version_major.stdout | int == 1
# - kubeadm_version_minor.stdout | int >= 15
# - server_enrolled.rc == 1
- name: Retreive certificats key on {{ lb_kubemaster }}
shell: set -o pipefail && kubeadm init phase upload-certs --upload-certs | grep -v upload-certs
register: kubernetes_certificateKey
delegate_to: "{{ lb_kubemaster }}"
when:
- server_enrolled.rc == 1
- kubernetes_master|bool
- kubeadm_version_major.stdout | int == 1
- kubeadm_version_minor.stdout | int >= 15
- name: Retreive token on "{{ lb_kubemaster }}"
command: kubeadm token create
register: kubetoken
delegate_to: "{{ lb_kubemaster }}"
when:
- server_enrolled.rc == 1
- name: Retreive hash certificat
shell: set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
register: cacerthash
delegate_to: "{{ lb_kubemaster }}"
when:
- server_enrolled.rc == 1
- name: Deploy kubeadm config
template:
src: kubeadm-config.yaml.j2
dest: /root/kubeadm-config.yaml
owner: root
group: root
mode: 0600
when:
- server_enrolled.rc == 1
- name: Join '{{ ansible_hostname }}' to Kubernetes cluster
command: kubeadm join --config=/root/kubeadm-config.yaml
when:
- kubeadm_version_major.stdout | int == 1
- kubeadm_version_minor.stdout | int >= 15
- server_enrolled.rc == 1
- name: Check if a node is still tainted
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_hostname | lower }}' -o jsonpath='{.spec.taints}'
when: kubernetes_master_taint
register: current_taint
- name: taint the machine if needed
# command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_hostname | lower }}' node-role.kubernetes.io/master-
when: kubernetes_master_taint|bool and current_taint.stdout
#
# At this point, we have a kubernetes up and running, but ready for it
#
- name: Make /root/.kube directory
file:
path: "/root/.kube"
owner: root
group: root
mode: 0700
state: directory
when:
- kubernetes_master|bool
- name: Copy certificat file on
copy:
src: /etc/kubernetes/admin.conf
dest: /root/.kube/config
remote_src: yes
owner: root
group: root
mode: 0600
when:
- kubernetes_master|bool
#
# Manque autoconfig de .kube/config local
#
#- name: Fetching CA certificat
# copy:
# src: /etc/kubernetes/pki/ca.crt
# dest: /root/.kube/{{ kubernetes_cluster_name }}/ca.crt
# when:
# - kubernetes_master|bigip_pool