Update
This commit is contained in:
29
roles/bootstrap_cluster/.travis.yml
Normal file
29
roles/bootstrap_cluster/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
34
roles/bootstrap_cluster/README.md
Normal file
34
roles/bootstrap_cluster/README.md
Normal file
@ -0,0 +1,34 @@
|
||||
Bootstrap Cluster
|
||||
=========
|
||||
|
||||
This role does the following:
|
||||
|
||||
- Cleans up old kubeadm configs and reset cluster on all nodes
|
||||
- Creates etcd for use with multiple master/controller nodes
|
||||
- Initializes the cluster on the master node
|
||||
- Distributes the $HOME/.kube/config to all nodes
|
||||
- Creates and parses out token and hash variables for dynamic kubeadm join commands on cluster nodes
|
||||
- Joins other nodes to the cluster
|
||||
- Installs flannel CNI
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
- Variables: Edit the variables for the etcd template in the vars directory
|
||||
- The ability to connect to the internet or a flannel.yaml file availble on an air gapped network
|
||||
- An account with sudoer privileges
|
||||
|
||||
Known issues
|
||||
--------------
|
||||
- Flannel pods stuck in an Error and/or Crash state: This was due to the api-server not reachable from flannel pods. Kube-proxy was not creating iptables rules. The only way to get around this was to disable firewalld, add multiple ALLOW policies in the kubeadm_install role, and remove all cluster configurations.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/bootstrap_cluster/defaults/main.yml
Normal file
2
roles/bootstrap_cluster/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for bootstrap_cluster
|
19
roles/bootstrap_cluster/files/config
Normal file
19
roles/bootstrap_cluster/files/config
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1EUXlNakU0TXpBek1Wb1hEVE14TURReU1ERTRNekF6TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHNoClg1RHk5aXdZeFB1UGhaUWs3Y2JqR0NjWTlld2dMSHA2TGVsYnJKT2hjOGw5eHYwUnNxOVhVd3MzVFNHMTFuUjMKdVdHemdQa0Z0TGZjdlNCRFhXQnExZWQ5NnRmamlRbVZIczEvWlhvUmZKczlteGVQcE1FNTAxN1lkNXJneFFucAorQmlxWGJYNkdycUxuV0VLQk5SNGFHU0ZHSmFWR1pDZ0JlYTVPNUFRS1JoZmhhaUdoTVdzQXVuRmM3dTVXRGw1CkdXZkpuZUd1bDVaTnNwOU81VkdhNERFamU3NFc0algwT08rTDVaaVFicHZoNmNUMUJOT1lhd1F1Q01hTEhvMUQKOVhXWmZyYlZ2VE1EMFZJOFZHekdxMSt2cGIxUVlwR2FNalNENkF1dGtaZ2JqcjNpeVJjUHM4ZTV5d0NaRmdFSAp1NWp1VDdwKytQb29Qc2dQU1hrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZGb3VSb2JFcHR4TnJLYU9iZzFOK3E1aGIvbXRNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCRkphemVkUlREcDN1aEJ6UnJ3NW0zNDBlc0VxR3p2eHVWcmF3bldEQWI2MUpISzBrbApMNit5VDlyeVNSR3JOanVkQnZXWlRrdHdBRytuczgvbWtnekRud2lIQ29qYk1odHNKN0ZNRDFTN3kwMDZqK0hQClVEclhud0hkWlhOdWNVR2pJc2VOckJjd3RGVytNMmYwNUIyaTNrazh6R1IvQzdpYndnZ3JNYmdjaGVZeDd4dEYKOW5hZ3VQM294REpZbzI4bGpzOWtDSmJTT1hDeTNBYXk5ZitUdEh3dFN6Uk1SaENkYVZuc2JPVjloaHA2dDBhSgpRRXRvaTBaODg2b24xYkNjSUNmdlZXTkpHREJZanVPTmNrWFQvM2MwenM1SmdMU3ZLL2s1U0RYUWNaOFpPc2V2Ck1tSWNhN2grQVNHM2VtNys0R0JtQ253YzhldWd1TGFHTEJjUQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://192.168.50.117:6443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@kubernetes
|
||||
current-context: kubernetes-admin@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJQ3FjSTVHVlZoOXd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TVRBME1qSXhPRE13TXpGYUZ3MHlNakEwTWpJeE9ETXdNelZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTJvV3A3Z2krVzNwYkVCMSsKVmx1MjZIcytuTU9vOWdtQ0JJT3lZcVZKN0JuK3F5eHl4N0R0cFRYWEJ4S1ExNHZkLzhTNE0wZGlMZUFwSW5OSgo0SjU2eFZVTmRHSnZrV1VrbklneHl1ejR0QVl4cHFyS1dwT29pWUtOZXlXY1JablRXTXhEK3BBV0o3TGJtYmJLCmlXdU5udkhlNG9PNVhUT2JaSDZBcXg0S2tzU3VWblJpa2YrSmwzd0NnL3hkMS9pYUI1K2tXQ3VwNTJ5a3F6cVEKb1dnOFZpT0JidzROb29seDZVb0gxN0N2L1QzbVdHWUp4Q0FyWHFFSlVJNWR3ZjJOZkxaUGZQbXREMlc3Q0lIQQptZjdGU2lCMFpsSlo4L2ZOVy9MTE9ITk03ZGEwY3FGNVlYZ01VM2YxK3g4TFNBeVJmK0swSFJaN1l3MG85YXFsCm5sR2tOUUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSYUxrYUd4S2JjVGF5bWptNE5UZnF1WVcvNQpyVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcktSbmtpdWVZblB0K0NlOHVhMjhmQXY4YS94RnJOZDdhN2ZrCm1pZXNqM0hRWHNlWDdwN0llcmhhQlhheXBpdGFHNzJoSnBSb2s4SWVwUGJiRE1NS2hzLytESkUxcmVWNmRtOHMKdUdpQkZWY2tTWWtoWUg2RjZ0eWtBRnlCZDZPU0lCZDg4RlNmdmZxczhWSi8ySThiWCt2RjlnT0l6dFZoSTVTNApOVXlpcTZtdXBkL2RYZmFibzJKaXRyT0YraWd0bTdXTEg2UUtBSDl0MW1KVW5qUU5ZWWZHRFhRUnVNdEY5QTF2CkhPYzZpM3E0TjNyWEdsa0wzb0psRlhuck9HWkdCMnZXUjFXRWdTS01iNUVtbnpvRjlwOHF1Y2s4SVdiOWZiSEUKaDR3MURtckVMUFk2TlM1a0xnMzZwbTVDMWJlUmVnbnp2Vld0Ky9JcXpqbzROOWtwdUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMm9XcDdnaStXM3BiRUIxK1ZsdTI2SHMrbk1PbzlnbUNCSU95WXFWSjdCbitxeXh5Cng3RHRwVFhYQnhLUTE0dmQvOFM0TTBkaUxlQXBJbk5KNEo1NnhWVU5kR0p2a1dVa25JZ3h5dXo0dEFZeHBxcksKV3BPb2lZS05leVdjUlpuVFdNeEQrcEFXSjdMYm1iYktpV3VObnZIZTRvTzVYVE9iWkg2QXF4NEtrc1N1Vm5SaQprZitKbDN3Q2cveGQxL2lhQjUra1dDdXA1MnlrcXpxUW9XZzhWaU9CYnc0Tm9vbHg2VW9IMTdDdi9UM21XR1lKCnhDQXJYcUVKVUk1ZHdmMk5mTFpQZlBtdEQyVzdDSUhBbWY3RlNpQjBabEpaOC9mTlcvTExPSE5NN2RhMGNxRjUKWVhnTVUzZjEreDhMU0F5UmYrSzBIUlo3WXcwbzlhcWxubEdrTlFJREFRQUJBb0lCQUVRZCtGdVA5QzYxMUY4UQpvb291NnJSTGZyZ0ZNYzBJSjdSYWpTZTgyRzgxcHpJYWwranhtUkt2MXNpSW5BZmt2SjMyNTJoc3J3a1g0QnF5CkQyeHFXbURhNDJmTkszUUtNRUJ2SC81ZW9pUVQ3alJDOXZNSEpqay9MZlVlUXpsYSt3dXFHT0twT1k3RWJUbmMKUmdSU040STlhVG40ejdaaTJPU1pubWc0NUJqZGpQQS9JK2xwSmNWZ0h0cFBTOFRYYnFoZ2VUMTd6SXVUNWJOZgpnam9lemZvOFRodGowdmliNFgwZ09Cc2hDaGlXWXFVSFlWR2I3Tm5MYVhKZUpvOUp4SllMdkZRYXIxU3V5L0lhCndDZ2lDZDNQR0pMZTk2NHJBRnRISVpQRDRKcUhPTmhiVmFOR0g2OFRReGRsZVZYZ0orRkVyY2hiSFB3M3kwQzUKWUNnVWwzMENnWUVBOGVYemdlOENmNzJGd0NWT1lQVlFNU1VOVTQ3d2dPV3l0WXFweHMvSjFvdVZEQjJNNnl1dQpLd2dIcFR2di82YWQ4ajg0YlpPR0doeTFvdGFrWTJiNk8xdXc5bjVFWDBzeWFML21KZWF4NnRlTzRRTS8wUmZzClZHbkdDOWxKRFkvRmlUdCt4THRINGl6K29KNVlNUjd5R2N1cjM4WGlUL2N5T1pSVjNhYVZKNHNDZ1lFQTUwTFoKN0JpSHlCNWlPYXIwaVp2dDRGOWJDNU9sck5GWnJheGZwVjdGRkluVVNsY1U2UjIySHVGV2tlODFiVDBLNTRseAo1cHpoZzd2TGRJbjc2VGhJN3VqRHA0UjZpaHl4dW5Udk8vcDh6MndISUUwQ3ZRWjNpUFVXcFBIWmdSMFZHVk42Cm1YKzduTWlFQWdpL2VZUDVPYkc1MnVnVUQzWEZLWmY4QXZWNzJ6OENnWUVBa1dLd3FTNWFwU1htWm1iRnBkYXEKM082MUJMeUxaaDBuL0o2YmpjZERPelJuWDRHL09YVG1XQ3lhVThBamJkYlpaVWlFQksrKzBLRGl0ajBsVGkwTgpSbkhFZVZISWpER28yWFpFd0JEWWJCb2tZSzdRUXo2S3B1MXZ2NTFYbjlRQ1dJbXVsbFV0VGczVzkvaFRieXAzClBmUEFtRnpadVZBTUdybEJwbGRCbkNNQ2dZQktaR3pwei9KTjQ4aEJPTWMxUlIyK1lhNU9DUTd3aXQvSVNIejAKRzRwV1V3Z2hhZVhtSDVLb1dHQ2F6VkpvYzR5QWN1eGEvUGhtZ2hDdXlueG94NXhlamkzeVEyR1A4QzhVQmUyMwpZNXFtdHQrTCtubjVDSTJIVnlBVHEyRUdjYTZKMlJyRktodldFWUsxak03YmJpTEw5bW9OQ3o3NHdpL01jNEcwCkNmZnZ6UUtCZ0FuQ056cUFWM2JObXFFUUtqeU10S2R6bzU4Szhnc1JQWFlHaVdqNmlYYWtPMkcyeVNFaTdhYngKbUxLOXhua3NaTlg4Q3JrUzF0ZWg1QnV2Ymp4d2MvcmlnQy94dERMVG9rY3Iza2pweWhoNjNPTEFMMmNKRHhOUApqY01ma0szcVpTaUhyUEtWY2Vld0QwaDZlbmxRcUU2VFVSUURqcXgreEpOcnlCdS9TNzdvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
|
2
roles/bootstrap_cluster/handlers/main.yml
Normal file
2
roles/bootstrap_cluster/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for bootstrap_cluster
|
53
roles/bootstrap_cluster/meta/main.yml
Normal file
53
roles/bootstrap_cluster/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
382
roles/bootstrap_cluster/tasks/main.yml
Normal file
382
roles/bootstrap_cluster/tasks/main.yml
Normal file
@ -0,0 +1,382 @@
|
||||
---
|
||||
# tasks file for bootstrap_cluster
|
||||
|
||||
# Tasks for bootstrapping the cluster
|
||||
|
||||
#Objectives
|
||||
#Install a single control-plane Kubernetes cluster
|
||||
#Install a Pod network on the cluster so that your Pods can talk to each other
|
||||
|
||||
# Initializing your control-plane node ( MASTER )
|
||||
# (Recommended) If you have plans to upgrade this single control-plane kubeadm cluster to high
|
||||
# availability you should specify the --control-plane-endpoint to set
|
||||
# the shared endpoint for all control-plane nodes.
|
||||
# Such an endpoint can be either a DNS name or an IP address of a load-balancer
|
||||
|
||||
# nginx LB IP = 192.168.50.117
|
||||
|
||||
################################################
|
||||
## Download and configure ETCd ##
|
||||
################################################
|
||||
# We must download the /etcd binaries and place them in the relvent directories
|
||||
# and copy some certificates for etcd to use
|
||||
|
||||
# Get etcd binaries:
|
||||
|
||||
#It was discovered that by having firewalld enabled when launching flannel pods, the cluster did not start properly
|
||||
#- name: Disable firewalld
|
||||
# service:
|
||||
# name: firewalld
|
||||
# state: stopped
|
||||
# tags:
|
||||
# - kubeadm_reset
|
||||
# - kubeadm_init
|
||||
|
||||
#Delete nodes
|
||||
- name: Delete nodes
|
||||
shell: kubectl delete nodes --all
|
||||
when: "'masters' in group_names"
|
||||
ignore_errors: true
|
||||
tags:
|
||||
- delete_nodes
|
||||
- kubeadm_init
|
||||
|
||||
# Remove old iptables rules and cni interface
|
||||
- name: Remove old iptables rules and delete cni interface
|
||||
shell: "{{ item }}"
|
||||
loop:
|
||||
- iptables -F
|
||||
- iptables -t nat -F
|
||||
- iptables -t mangle -F
|
||||
- iptables -X
|
||||
- ip link set cni0 down
|
||||
- sudo brctl delbr cni0
|
||||
ignore_errors: true
|
||||
tags:
|
||||
- delete_nodes
|
||||
- kubeadm_init
|
||||
|
||||
# Make it so iptables is configured to allow flannel and coredns pods to start and add iptables rules
|
||||
- name: iptables default policies need to be ACCEPT on all chains
|
||||
iptables:
|
||||
chain: '{{item}}'
|
||||
policy: ACCEPT
|
||||
with_items:
|
||||
- INPUT
|
||||
- FORWARD
|
||||
- OUTPUT
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# when the above issue is encountred it is neccessary to remove these files
|
||||
- name: Clean up cluster and etcd and cni
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /var/lib/etcd
|
||||
- /home/k8sadmin/.kube
|
||||
- /root/.kube
|
||||
- /etc/cni/net.d
|
||||
tags:
|
||||
- kubeadm_reset
|
||||
- kubeadm_init
|
||||
|
||||
|
||||
# Install and configure etcd
|
||||
- name: Download etcd version
|
||||
get_url:
|
||||
url: https://github.com/etcd-io/etcd/releases/download/v3.4.15/etcd-v3.4.15-linux-arm64.tar.gz
|
||||
dest: /home/k8sadmin
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Untar the binaries
|
||||
- name: Untar the binary
|
||||
unarchive:
|
||||
src: /home/k8sadmin/etcd-v3.4.15-linux-arm64.tar.gz
|
||||
dest: /home/k8sadmin
|
||||
remote_src: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Copy the etcd binaries to /usr/local/bin path
|
||||
- name: Move etcd-v3.4.15-linux-amd64/etcd* contensts to /usr/local/bin
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
with_items:
|
||||
- { src: /home/k8sadmin/etcd-v3.4.15-linux-arm64/etcd }
|
||||
- { src: /home/k8sadmin/etcd-v3.4.15-linux-arm64/etcdctl }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Create extra directories for etcd
|
||||
- name: make /etc/etcd and /var/lib/etcd directories
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
with_items:
|
||||
- { path: /etc/etcd }
|
||||
- { path: /var/lib/etcd }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Copy certs
|
||||
- name: Copy certifactes and keys to /etc/etcd
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: /etc/etcd
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- { src: /home/k8sadmin/ca.pem }
|
||||
- { src: /home/k8sadmin/k8s-master.pem }
|
||||
- { src: /home/k8sadmin/k8smasterkey.pem }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# The following steps configure the etcd daemon for systemd to start on startup
|
||||
|
||||
# Place a j2 template into /etc/systemd/system/etcd.service using variables
|
||||
- name: Create systemd etcd service
|
||||
template:
|
||||
src: etcd.service.j2
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Daemon reload so systemd can use the new service
|
||||
- name: Daemon reload so systemd can use the new service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Start and enabled etcd service
|
||||
- name: Start and enable the etcd service
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
################################################
|
||||
## Configure control plane and workers ##
|
||||
################################################
|
||||
|
||||
# Reset kubeadm
|
||||
- name: Reset kubeadm
|
||||
shell: kubeadm reset --force
|
||||
tags:
|
||||
- kubeadm_reset
|
||||
- kubeadm_init
|
||||
when: "'workers' or 'masters' in group_names"
|
||||
|
||||
- name: Copy kubeconfig for initializing the cluster
|
||||
template:
|
||||
src: cluster.kubeconfig.j2
|
||||
dest: /home/k8sadmin/cluster.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: kubeadm init
|
||||
shell: kubeadm init --config /home/k8sadmin/cluster.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Create $HOME/.kube directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- /home/k8sadmin/.kube
|
||||
- /root/.kube
|
||||
when: "'masters' or 'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Copy admin config to k8sadmin
|
||||
copy:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: "{{ item }}"
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
remote_src: yes
|
||||
loop:
|
||||
- /home/k8sadmin/.kube/config
|
||||
- /root/.kube/config
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Gather admin.conf
|
||||
find:
|
||||
paths: /home/k8sadmin/.kube/
|
||||
recurse: no
|
||||
patterns: "config"
|
||||
register: files_to_copy
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Fetch admin.conf to ansible controller
|
||||
fetch:
|
||||
src: "{{ item.path }}"
|
||||
dest: roles/bootstrap_cluster/files/
|
||||
flat: yes
|
||||
with_items: "{{ files_to_copy.files }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Distribute admin.conf to workers
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin/.kube/config"
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
with_items:
|
||||
- { src: config }
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Create a join token and hash of the ca and parse out token and hash into their respctive variables
|
||||
- name: Create token and hash ans parse them out
|
||||
shell: kubeadm token create --print-join-command > join.txt
|
||||
#register: results
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Parse join file for token and create token variable
|
||||
shell: "cat join.txt | awk '{ print $5 }'"
|
||||
register: token
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Create token var
|
||||
set_fact:
|
||||
token: "{{ token.stdout }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Output token variable
|
||||
debug:
|
||||
var: token
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Parse join file for hash and create hash variable
|
||||
shell: "cat join.txt | awk '{ print $7 }'"
|
||||
register: hash
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Create token var
|
||||
set_fact:
|
||||
hash: "{{ hash.stdout }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Output hash variable
|
||||
debug:
|
||||
var: hash
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: add token and hash to dummy host to pass facts between hosts
|
||||
add_host:
|
||||
name: "192.168.50.240"
|
||||
token: "{{ token }}"
|
||||
hash: "{{ hash }}"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Copy kube-flannel.yml config to host
|
||||
- name: Copy kube-flannel.yml to host
|
||||
template:
|
||||
src: kube-flannel.j2
|
||||
dest: /home/k8sadmin/kube-flannel.yml
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Apply pod network with Flannel
|
||||
- name: Apply flannel
|
||||
shell: kubectl apply -f /home/k8sadmin/kube-flannel.yml
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Join worker nodes
|
||||
|
||||
- name: Join worker nodes to cluster
|
||||
shell: |
|
||||
kubeadm join 192.168.50.117:6443 \
|
||||
--token "{{ hostvars['192.168.50.240']['token'] }}" \
|
||||
--discovery-token-ca-cert-hash "{{ hostvars['192.168.50.240']['hash'] }}"
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- join
|
||||
|
||||
- name: Sleep for 1 minute to give pods time to come up
|
||||
wait_for:
|
||||
timeout: 60
|
||||
delegate_to: localhost
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Restart containerd incase cni0 didnt get created
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Join control nodes
|
||||
#- name: Join other control nodes to cluster
|
||||
# shell: |
|
||||
# kubeadm join 192.168.50.117:6443 \
|
||||
# --token {{ token.stdout }} \
|
||||
# --discovery-token-ca-cert-hash sha256:0ea3240343360022ebe06d56dc4d993ff9087c2a2910c7a238c95416596582f7 \
|
||||
# --control-plane
|
||||
|
||||
#kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml
|
||||
|
28
roles/bootstrap_cluster/templates/cluster.kubeconfig.j2
Normal file
28
roles/bootstrap_cluster/templates/cluster.kubeconfig.j2
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kubernetesVersion: 1.21.0
|
||||
kind: ClusterConfiguration
|
||||
controlPlaneEndpoint: "192.168.50.117:6443"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
advertise-address: 192.168.50.240
|
||||
encryption-provider-config: /etc/pki/encryption-config.yaml
|
||||
etcd:
|
||||
external:
|
||||
endpoints:
|
||||
- https://192.168.50.240:2379
|
||||
caFile: /etc/etcd/ca.pem
|
||||
certFile: /etc/etcd/k8s-master.pem
|
||||
keyFile: /etc/etcd/k8smasterkey.pem
|
||||
networking:
|
||||
podSubnet: 10.240.0.0/16
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
runtime-cgroups: /system.slice/containerd.service
|
||||
kubelet-cgroups: /systemd/system.slice
|
||||
container-runtime: remote
|
||||
container-runtime-endpoint: unix:///run/containerd/containerd.sock
|
||||
cgroup-driver: systemd
|
||||
criSocket: /var/run/containerd/containerd.sock
|
29
roles/bootstrap_cluster/templates/etcd.service.j2
Normal file
29
roles/bootstrap_cluster/templates/etcd.service.j2
Normal file
@ -0,0 +1,29 @@
|
||||
[Unit]
|
||||
Description=etcd
|
||||
Documentation=https://github.com/etcd-io/etcd/releases/tag/v3.4.15
|
||||
|
||||
[Service]
|
||||
Environment="ETCD_UNSUPPORTED_ARCH=arm64"
|
||||
ExecStart=/usr/local/bin/etcd \
|
||||
--name {{ k8smaster01_hostname }} \
|
||||
--cert-file=/etc/etcd/k8s-master.pem \
|
||||
--key-file=/etc/etcd/k8smasterkey.pem \
|
||||
--peer-cert-file=/etc/etcd/k8s-master.pem \
|
||||
--peer-key-file=/etc/etcd/k8smasterkey.pem\
|
||||
--trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--peer-client-cert-auth \
|
||||
--client-cert-auth \
|
||||
--initial-advertise-peer-urls https://{{ k8smaster01_ip }}:2380 \
|
||||
--listen-peer-urls https://{{ k8smaster01_ip }}:2380 \
|
||||
--listen-client-urls https://{{ k8smaster01_ip }}:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://{{ k8smaster01_ip }}:2379 \
|
||||
--initial-cluster-token etcd-cluster-0 \
|
||||
--initial-cluster {{ cluster }} \
|
||||
--initial-cluster-state new \
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
223
roles/bootstrap_cluster/templates/kube-flannel.j2
Normal file
223
roles/bootstrap_cluster/templates/kube-flannel.j2
Normal file
@ -0,0 +1,223 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: psp.flannel.unprivileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- configMap
|
||||
- secret
|
||||
- emptyDir
|
||||
- hostPath
|
||||
allowedHostPaths:
|
||||
- pathPrefix: "/etc/cni/net.d"
|
||||
- pathPrefix: "/etc/kube-flannel"
|
||||
- pathPrefix: "/run/flannel"
|
||||
readOnlyRootFilesystem: false
|
||||
# Users and groups
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
# Privilege Escalation
|
||||
allowPrivilegeEscalation: false
|
||||
defaultAllowPrivilegeEscalation: false
|
||||
# Capabilities
|
||||
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
|
||||
defaultAddCapabilities: []
|
||||
requiredDropCapabilities: []
|
||||
# Host namespaces
|
||||
hostPID: false
|
||||
hostIPC: false
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
# SELinux
|
||||
seLinux:
|
||||
# SELinux is unused in CaaSP
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['psp.flannel.unprivileged']
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"hairpinMode": true,
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "10.240.0.0/16",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flannel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: flannel
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.14.0-rc1
|
||||
command:
|
||||
- cp
|
||||
args:
|
||||
- -f
|
||||
- /etc/kube-flannel/cni-conf.json
|
||||
- /etc/cni/net.d/10-flannel.conflist
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.14.0-rc1
|
||||
command:
|
||||
- /opt/bin/flanneld
|
||||
args:
|
||||
- --ip-masq
|
||||
- --kube-subnet-mgr
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
securityContext:
|
||||
privileged: false
|
||||
capabilities:
|
||||
add: ["NET_ADMIN", "NET_RAW"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run/flannel
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run/flannel
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
2
roles/bootstrap_cluster/tests/inventory
Normal file
2
roles/bootstrap_cluster/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/bootstrap_cluster/tests/test.yml
Normal file
5
roles/bootstrap_cluster/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- bootstrap_cluster
|
13
roles/bootstrap_cluster/vars/main.yml
Normal file
13
roles/bootstrap_cluster/vars/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
# vars file for bootstrap_cluster
|
||||
# IP addresses of masters
|
||||
k8smaster01_ip: 192.168.50.240
|
||||
#k8smaster02_ip: 192.168.50.202
|
||||
|
||||
|
||||
# Master Hostnames
|
||||
k8smaster01_hostname: k8smaster01
|
||||
#k8smaster02_hostname: k8smaster02
|
||||
|
||||
|
||||
cluster: k8smaster01=https://192.168.50.240:2380
|
Reference in New Issue
Block a user