cleaning, add ipvs and fixed some litle bug

This commit is contained in:
Adrien Reslinger 2020-05-10 21:01:26 +02:00
parent 103271d12c
commit 56bb742890
5 changed files with 85 additions and 24 deletions

View file

@ -24,30 +24,54 @@
shell: |
swapoff -a
- name: Remove swapfile from /etc/fstab
- name: Remove swapfile from /etc/fstab (2/2)
mount:
name: swap
fstype: swap
state: absent
- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists
file:
path: "/etc/systemd/system/kubelet.service.d"
state: "directory"
group: root
owner: root
mode: 0755
- name: Configure kubelet service
- name: Configuring IPVS kernel module to be load on boot
template:
src: "etc/{{ item }}.j2"
dest: "/etc/{{ item }}"
src: "etc/modules-load.d/ipvs.conf.j2"
dest: "/etc/modules-load.d/ipvs.conf"
group: root
owner: root
mode: 0644
when:
- kubernetes_kubeproxy_mode == "ipvs"
- name: Load IPVS kernel module
modprobe:
name: "{{ item }}"
state: present
with_items:
- "systemd/system/kubelet.service.d/0-containerd.conf"
- "sysconfig/kubelet"
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
- nf_conntrack_ipv6
when:
- kubernetes_kubeproxy_mode == "ipvs"
#- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists
# file:
# path: "/etc/systemd/system/kubelet.service.d"
# state: "directory"
# group: root
# owner: root
# mode: 0755
#
#- name: Configure kubelet service
# template:
# src: "etc/{{ item }}.j2"
# dest: "/etc/{{ item }}"
# group: root
# owner: root
# mode: 0644
# with_items:
# - "systemd/system/kubelet.service.d/0-containerd.conf"
# - "sysconfig/kubelet"
- name: Enable kubelet on boot
service:
@ -179,12 +203,13 @@
- server_enrolled.rc == 1
- name: Check if a node is still tainted
command: kubectl get nodes '{{ ansible_host }}' -o jsonpath='{.spec.taints}'
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_host | lower }}' -o jsonpath='{.spec.taints}'
when: kubernetes_master_taint
register: current_taint
- name: taint the machine if needed
command: kubectl taint nodes --all node-role.kubernetes.io/master-
# command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_host | lower }}' node-role.kubernetes.io/master-
when: kubernetes_master_taint == true and current_taint.stdout
#