cleaning, add ipvs and fixed some litle bug

This commit is contained in:
Adrien Reslinger 2020-05-10 21:01:26 +02:00
parent 103271d12c
commit 56bb742890
5 changed files with 85 additions and 24 deletions

View file

@ -3,5 +3,6 @@
#kubernetes_cri: "containerd" #kubernetes_cri: "containerd"
kubernetes_server: false kubernetes_server: false
# value for kuberntes_network: calico, weave-net # value for kuberntes_network: calico, weave-net
#kubernetes_network: calico #kubernetes_network: weave-net
kubernetes_kubeproxy_mode: ipvs
kubernetes_version: 1.18.2 kubernetes_version: 1.18.2

View file

@ -24,30 +24,54 @@
shell: | shell: |
swapoff -a swapoff -a
- name: Remove swapfile from /etc/fstab - name: Remove swapfile from /etc/fstab (2/2)
mount: mount:
name: swap name: swap
fstype: swap fstype: swap
state: absent state: absent
- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists - name: Configuring IPVS kernel module to be load on boot
file:
path: "/etc/systemd/system/kubelet.service.d"
state: "directory"
group: root
owner: root
mode: 0755
- name: Configure kubelet service
template: template:
src: "etc/{{ item }}.j2" src: "etc/modules-load.d/ipvs.conf.j2"
dest: "/etc/{{ item }}" dest: "/etc/modules-load.d/ipvs.conf"
group: root group: root
owner: root owner: root
mode: 0644 mode: 0644
when:
- kubernetes_kubeproxy_mode == "ipvs"
- name: Load IPVS kernel module
modprobe:
name: "{{ item }}"
state: present
with_items: with_items:
- "systemd/system/kubelet.service.d/0-containerd.conf" - ip_vs
- "sysconfig/kubelet" - ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
- nf_conntrack_ipv4
- nf_conntrack_ipv6
when:
- kubernetes_kubeproxy_mode == "ipvs"
#- name: Ensuring /etc/systemd/system/kubelet.service.d Folder Exists
# file:
# path: "/etc/systemd/system/kubelet.service.d"
# state: "directory"
# group: root
# owner: root
# mode: 0755
#
#- name: Configure kubelet service
# template:
# src: "etc/{{ item }}.j2"
# dest: "/etc/{{ item }}"
# group: root
# owner: root
# mode: 0644
# with_items:
# - "systemd/system/kubelet.service.d/0-containerd.conf"
# - "sysconfig/kubelet"
- name: Enable kubelet on boot - name: Enable kubelet on boot
service: service:
@ -179,12 +203,13 @@
- server_enrolled.rc == 1 - server_enrolled.rc == 1
- name: Check if a node is still tainted - name: Check if a node is still tainted
command: kubectl get nodes '{{ ansible_host }}' -o jsonpath='{.spec.taints}' command: kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes '{{ ansible_host | lower }}' -o jsonpath='{.spec.taints}'
when: kubernetes_master_taint when: kubernetes_master_taint
register: current_taint register: current_taint
- name: taint the machine if needed - name: taint the machine if needed
command: kubectl taint nodes --all node-role.kubernetes.io/master- # command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
command: kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes '{{ ansible_host | lower }}' node-role.kubernetes.io/master-
when: kubernetes_master_taint == true and current_taint.stdout when: kubernetes_master_taint == true and current_taint.stdout
# #

View file

@ -23,4 +23,16 @@
{% else %} {% else %}
<port protocol="tcp" port="10250"/> <port protocol="tcp" port="10250"/>
{% endif %} {% endif %}
{% if kubernetes_network == "flannel" %}
# flannel vxlan
<port protocol="udp" port="8472"/>
{% elif kubernetes_network == "calico" %}
# calico
<port protocol="udp" port="4789"/>
<port protocol="tcp" port="5473"/>
{% elif kubernetes_network == "weave-net" %}
# Weave-Net
<port protocol="udp" port="6783-6784"/>
<port protocol="tcp" port="6783"/>
{% endif %}
</service> </service>

View file

@ -0,0 +1,6 @@
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
nf_conntrack_ipv6

View file

@ -9,6 +9,8 @@ bootstrapTokens:
nodeRegistration: nodeRegistration:
{% if kubernetes_cri == "containerd" %} {% if kubernetes_cri == "containerd" %}
criSocket: "/run/containerd/containerd.sock" criSocket: "/run/containerd/containerd.sock"
{% elif kubernetes_cri == "cri-o" %}
criSocket: "/var/run/crio/crio.sock"
{% elif kubernetes_cri == "docker" %} {% elif kubernetes_cri == "docker" %}
criSocket: "/var/run/docker.sock" criSocket: "/var/run/docker.sock"
{% endif %} {% endif %}
@ -20,13 +22,15 @@ nodeRegistration:
effect: "NoSchedule" effect: "NoSchedule"
{% endif %} {% endif %}
kubeletExtraArgs: kubeletExtraArgs:
{% if kubernetes_cri == "containerd" %}
cgroup-driver: "systemd" cgroup-driver: "systemd"
container-runtime: "remote" container-runtime: "remote"
runtime-request-timeout: "15m" runtime-request-timeout: "5m"
{% if kubernetes_cri == "containerd" %}
container-runtime-endpoint: "unix:///run/containerd/containerd.sock" container-runtime-endpoint: "unix:///run/containerd/containerd.sock"
{% elif kubernetes_cri == "cri-o" %}
container-runtime-endpoint: "unix:///var/run/crio/crio.sock"
{% endif %} {% endif %}
node-ip: {{ ansible_host }} node-ip: {{ ansible_default_ipv4.address }}
read-only-port: "10255" read-only-port: "10255"
ignorePreflightErrors: ignorePreflightErrors:
- SystemVerification - SystemVerification
@ -34,7 +38,7 @@ nodeRegistration:
- IsPrivilegedUser - IsPrivilegedUser
{% endif %} {% endif %}
localAPIEndpoint: localAPIEndpoint:
advertiseAddress: "{{ ansible_host }}" advertiseAddress: "{{ ansible_default_ipv4.address }}"
bindPort: 6443 bindPort: 6443
{% if kubernetes_certificateKey is defined %} {% if kubernetes_certificateKey is defined %}
certificateKey: "{{ kubernetes_certificateKey.stdout }}" certificateKey: "{{ kubernetes_certificateKey.stdout }}"
@ -56,7 +60,7 @@ discovery:
token: "{{ kubetoken.stdout }}" token: "{{ kubetoken.stdout }}"
nodeRegistration: nodeRegistration:
kubeletExtraArgs: kubeletExtraArgs:
node-ip: {{ ansible_host }} node-ip: {{ ansible_default_ipv4.address }}
read-only-port: "10255" read-only-port: "10255"
ignorePreflightErrors: ignorePreflightErrors:
- SystemVerification - SystemVerification
@ -75,7 +79,20 @@ apiServer:
certSANs: certSANs:
- "{{ lb_kubemaster }}" - "{{ lb_kubemaster }}"
{% endif %} {% endif %}
{% if kubernetes_network == "flannel" %} {% if kubernetes_network == "flannel" or kubernetes_network == "calico" %}
networking: networking:
{% if kubernetes_network == "flannel" %}
podSubnet: "10.244.0.0/16" podSubnet: "10.244.0.0/16"
{% endif %} {% elif kubernetes_network == "calico" %}
podSubnet: "192.168.0.0/16"
{% endif %}
{% endif %}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
{% if kubernetes_kubeproxy_mode is defined %}
mode: {{ kubernetes_kubeproxy_mode }}
{% endif %}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration