Update
This commit is contained in:
29
roles/bootstrap_cluster/.travis.yml
Normal file
29
roles/bootstrap_cluster/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
34
roles/bootstrap_cluster/README.md
Normal file
34
roles/bootstrap_cluster/README.md
Normal file
@ -0,0 +1,34 @@
|
||||
Bootstrap Cluster
|
||||
=========
|
||||
|
||||
This role does the following:
|
||||
|
||||
- Cleans up old kubeadm configs and reset cluster on all nodes
|
||||
- Creates etcd for use with multiple master/controller nodes
|
||||
- Initializes the cluster on the master node
|
||||
- Distributes the $HOME/.kube/config to all nodes
|
||||
- Creates and parses out token and hash variables for dynamic kubeadm join commands on cluster nodes
|
||||
- Joins other nodes to the cluster
|
||||
- Installs flannel CNI
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
- Variables: Edit the variables for the etcd template in the vars directory
|
||||
- The ability to connect to the internet or a flannel.yaml file availble on an air gapped network
|
||||
- An account with sudoer privileges
|
||||
|
||||
Known issues
|
||||
--------------
|
||||
- Flannel pods stuck in an Error and/or Crash state: This was due to the api-server not reachable from flannel pods. Kube-proxy was not creating iptables rules. The only way to get around this was to disable firewalld, add multiple ALLOW policies in the kubeadm_install role, and remove all cluster configurations.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/bootstrap_cluster/defaults/main.yml
Normal file
2
roles/bootstrap_cluster/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for bootstrap_cluster
|
19
roles/bootstrap_cluster/files/config
Normal file
19
roles/bootstrap_cluster/files/config
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1EUXlNakU0TXpBek1Wb1hEVE14TURReU1ERTRNekF6TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHNoClg1RHk5aXdZeFB1UGhaUWs3Y2JqR0NjWTlld2dMSHA2TGVsYnJKT2hjOGw5eHYwUnNxOVhVd3MzVFNHMTFuUjMKdVdHemdQa0Z0TGZjdlNCRFhXQnExZWQ5NnRmamlRbVZIczEvWlhvUmZKczlteGVQcE1FNTAxN1lkNXJneFFucAorQmlxWGJYNkdycUxuV0VLQk5SNGFHU0ZHSmFWR1pDZ0JlYTVPNUFRS1JoZmhhaUdoTVdzQXVuRmM3dTVXRGw1CkdXZkpuZUd1bDVaTnNwOU81VkdhNERFamU3NFc0algwT08rTDVaaVFicHZoNmNUMUJOT1lhd1F1Q01hTEhvMUQKOVhXWmZyYlZ2VE1EMFZJOFZHekdxMSt2cGIxUVlwR2FNalNENkF1dGtaZ2JqcjNpeVJjUHM4ZTV5d0NaRmdFSAp1NWp1VDdwKytQb29Qc2dQU1hrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZGb3VSb2JFcHR4TnJLYU9iZzFOK3E1aGIvbXRNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCRkphemVkUlREcDN1aEJ6UnJ3NW0zNDBlc0VxR3p2eHVWcmF3bldEQWI2MUpISzBrbApMNit5VDlyeVNSR3JOanVkQnZXWlRrdHdBRytuczgvbWtnekRud2lIQ29qYk1odHNKN0ZNRDFTN3kwMDZqK0hQClVEclhud0hkWlhOdWNVR2pJc2VOckJjd3RGVytNMmYwNUIyaTNrazh6R1IvQzdpYndnZ3JNYmdjaGVZeDd4dEYKOW5hZ3VQM294REpZbzI4bGpzOWtDSmJTT1hDeTNBYXk5ZitUdEh3dFN6Uk1SaENkYVZuc2JPVjloaHA2dDBhSgpRRXRvaTBaODg2b24xYkNjSUNmdlZXTkpHREJZanVPTmNrWFQvM2MwenM1SmdMU3ZLL2s1U0RYUWNaOFpPc2V2Ck1tSWNhN2grQVNHM2VtNys0R0JtQ253YzhldWd1TGFHTEJjUQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://192.168.50.117:6443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@kubernetes
|
||||
current-context: kubernetes-admin@kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJQ3FjSTVHVlZoOXd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TVRBME1qSXhPRE13TXpGYUZ3MHlNakEwTWpJeE9ETXdNelZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTJvV3A3Z2krVzNwYkVCMSsKVmx1MjZIcytuTU9vOWdtQ0JJT3lZcVZKN0JuK3F5eHl4N0R0cFRYWEJ4S1ExNHZkLzhTNE0wZGlMZUFwSW5OSgo0SjU2eFZVTmRHSnZrV1VrbklneHl1ejR0QVl4cHFyS1dwT29pWUtOZXlXY1JablRXTXhEK3BBV0o3TGJtYmJLCmlXdU5udkhlNG9PNVhUT2JaSDZBcXg0S2tzU3VWblJpa2YrSmwzd0NnL3hkMS9pYUI1K2tXQ3VwNTJ5a3F6cVEKb1dnOFZpT0JidzROb29seDZVb0gxN0N2L1QzbVdHWUp4Q0FyWHFFSlVJNWR3ZjJOZkxaUGZQbXREMlc3Q0lIQQptZjdGU2lCMFpsSlo4L2ZOVy9MTE9ITk03ZGEwY3FGNVlYZ01VM2YxK3g4TFNBeVJmK0swSFJaN1l3MG85YXFsCm5sR2tOUUlEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSYUxrYUd4S2JjVGF5bWptNE5UZnF1WVcvNQpyVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcktSbmtpdWVZblB0K0NlOHVhMjhmQXY4YS94RnJOZDdhN2ZrCm1pZXNqM0hRWHNlWDdwN0llcmhhQlhheXBpdGFHNzJoSnBSb2s4SWVwUGJiRE1NS2hzLytESkUxcmVWNmRtOHMKdUdpQkZWY2tTWWtoWUg2RjZ0eWtBRnlCZDZPU0lCZDg4RlNmdmZxczhWSi8ySThiWCt2RjlnT0l6dFZoSTVTNApOVXlpcTZtdXBkL2RYZmFibzJKaXRyT0YraWd0bTdXTEg2UUtBSDl0MW1KVW5qUU5ZWWZHRFhRUnVNdEY5QTF2CkhPYzZpM3E0TjNyWEdsa0wzb0psRlhuck9HWkdCMnZXUjFXRWdTS01iNUVtbnpvRjlwOHF1Y2s4SVdiOWZiSEUKaDR3MURtckVMUFk2TlM1a0xnMzZwbTVDMWJlUmVnbnp2Vld0Ky9JcXpqbzROOWtwdUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMm9XcDdnaStXM3BiRUIxK1ZsdTI2SHMrbk1PbzlnbUNCSU95WXFWSjdCbitxeXh5Cng3RHRwVFhYQnhLUTE0dmQvOFM0TTBkaUxlQXBJbk5KNEo1NnhWVU5kR0p2a1dVa25JZ3h5dXo0dEFZeHBxcksKV3BPb2lZS05leVdjUlpuVFdNeEQrcEFXSjdMYm1iYktpV3VObnZIZTRvTzVYVE9iWkg2QXF4NEtrc1N1Vm5SaQprZitKbDN3Q2cveGQxL2lhQjUra1dDdXA1MnlrcXpxUW9XZzhWaU9CYnc0Tm9vbHg2VW9IMTdDdi9UM21XR1lKCnhDQXJYcUVKVUk1ZHdmMk5mTFpQZlBtdEQyVzdDSUhBbWY3RlNpQjBabEpaOC9mTlcvTExPSE5NN2RhMGNxRjUKWVhnTVUzZjEreDhMU0F5UmYrSzBIUlo3WXcwbzlhcWxubEdrTlFJREFRQUJBb0lCQUVRZCtGdVA5QzYxMUY4UQpvb291NnJSTGZyZ0ZNYzBJSjdSYWpTZTgyRzgxcHpJYWwranhtUkt2MXNpSW5BZmt2SjMyNTJoc3J3a1g0QnF5CkQyeHFXbURhNDJmTkszUUtNRUJ2SC81ZW9pUVQ3alJDOXZNSEpqay9MZlVlUXpsYSt3dXFHT0twT1k3RWJUbmMKUmdSU040STlhVG40ejdaaTJPU1pubWc0NUJqZGpQQS9JK2xwSmNWZ0h0cFBTOFRYYnFoZ2VUMTd6SXVUNWJOZgpnam9lemZvOFRodGowdmliNFgwZ09Cc2hDaGlXWXFVSFlWR2I3Tm5MYVhKZUpvOUp4SllMdkZRYXIxU3V5L0lhCndDZ2lDZDNQR0pMZTk2NHJBRnRISVpQRDRKcUhPTmhiVmFOR0g2OFRReGRsZVZYZ0orRkVyY2hiSFB3M3kwQzUKWUNnVWwzMENnWUVBOGVYemdlOENmNzJGd0NWT1lQVlFNU1VOVTQ3d2dPV3l0WXFweHMvSjFvdVZEQjJNNnl1dQpLd2dIcFR2di82YWQ4ajg0YlpPR0doeTFvdGFrWTJiNk8xdXc5bjVFWDBzeWFML21KZWF4NnRlTzRRTS8wUmZzClZHbkdDOWxKRFkvRmlUdCt4THRINGl6K29KNVlNUjd5R2N1cjM4WGlUL2N5T1pSVjNhYVZKNHNDZ1lFQTUwTFoKN0JpSHlCNWlPYXIwaVp2dDRGOWJDNU9sck5GWnJheGZwVjdGRkluVVNsY1U2UjIySHVGV2tlODFiVDBLNTRseAo1cHpoZzd2TGRJbjc2VGhJN3VqRHA0UjZpaHl4dW5Udk8vcDh6MndISUUwQ3ZRWjNpUFVXcFBIWmdSMFZHVk42Cm1YKzduTWlFQWdpL2VZUDVPYkc1MnVnVUQzWEZLWmY4QXZWNzJ6OENnWUVBa1dLd3FTNWFwU1htWm1iRnBkYXEKM082MUJMeUxaaDBuL0o2YmpjZERPelJuWDRHL09YVG1XQ3lhVThBamJkYlpaVWlFQksrKzBLRGl0ajBsVGkwTgpSbkhFZVZISWpER28yWFpFd0JEWWJCb2tZSzdRUXo2S3B1MXZ2NTFYbjlRQ1dJbXVsbFV0VGczVzkvaFRieXAzClBmUEFtRnpadVZBTUdybEJwbGRCbkNNQ2dZQktaR3pwei9KTjQ4aEJPTWMxUlIyK1lhNU9DUTd3aXQvSVNIejAKRzRwV1V3Z2hhZVhtSDVLb1dHQ2F6VkpvYzR5QWN1eGEvUGhtZ2hDdXlueG94NXhlamkzeVEyR1A4QzhVQmUyMwpZNXFtdHQrTCtubjVDSTJIVnlBVHEyRUdjYTZKMlJyRktodldFWUsxak03YmJpTEw5bW9OQ3o3NHdpL01jNEcwCkNmZnZ6UUtCZ0FuQ056cUFWM2JObXFFUUtqeU10S2R6bzU4Szhnc1JQWFlHaVdqNmlYYWtPMkcyeVNFaTdhYngKbUxLOXhua3NaTlg4Q3JrUzF0ZWg1QnV2Ymp4d2MvcmlnQy94dERMVG9rY3Iza2pweWhoNjNPTEFMMmNKRHhOUApqY01ma0szcVpTaUhyUEtWY2Vld0QwaDZlbmxRcUU2VFVSUURqcXgreEpOcnlCdS9TNzdvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
|
2
roles/bootstrap_cluster/handlers/main.yml
Normal file
2
roles/bootstrap_cluster/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for bootstrap_cluster
|
53
roles/bootstrap_cluster/meta/main.yml
Normal file
53
roles/bootstrap_cluster/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
382
roles/bootstrap_cluster/tasks/main.yml
Normal file
382
roles/bootstrap_cluster/tasks/main.yml
Normal file
@ -0,0 +1,382 @@
|
||||
---
|
||||
# tasks file for bootstrap_cluster
|
||||
|
||||
# Tasks for bootstrapping the cluster
|
||||
|
||||
#Objectives
|
||||
#Install a single control-plane Kubernetes cluster
|
||||
#Install a Pod network on the cluster so that your Pods can talk to each other
|
||||
|
||||
# Initializing your control-plane node ( MASTER )
|
||||
# (Recommended) If you have plans to upgrade this single control-plane kubeadm cluster to high
|
||||
# availability you should specify the --control-plane-endpoint to set
|
||||
# the shared endpoint for all control-plane nodes.
|
||||
# Such an endpoint can be either a DNS name or an IP address of a load-balancer
|
||||
|
||||
# nginx LB IP = 192.168.50.117
|
||||
|
||||
################################################
|
||||
## Download and configure ETCd ##
|
||||
################################################
|
||||
# We must download the /etcd binaries and place them in the relvent directories
|
||||
# and copy some certificates for etcd to use
|
||||
|
||||
# Get etcd binaries:
|
||||
|
||||
#It was discovered that by having firewalld enabled when launching flannel pods, the cluster did not start properly
|
||||
#- name: Disable firewalld
|
||||
# service:
|
||||
# name: firewalld
|
||||
# state: stopped
|
||||
# tags:
|
||||
# - kubeadm_reset
|
||||
# - kubeadm_init
|
||||
|
||||
#Delete nodes
|
||||
- name: Delete nodes
|
||||
shell: kubectl delete nodes --all
|
||||
when: "'masters' in group_names"
|
||||
ignore_errors: true
|
||||
tags:
|
||||
- delete_nodes
|
||||
- kubeadm_init
|
||||
|
||||
# Remove old iptables rules and cni interface
|
||||
- name: Remove old iptables rules and delete cni interface
|
||||
shell: "{{ item }}"
|
||||
loop:
|
||||
- iptables -F
|
||||
- iptables -t nat -F
|
||||
- iptables -t mangle -F
|
||||
- iptables -X
|
||||
- ip link set cni0 down
|
||||
- sudo brctl delbr cni0
|
||||
ignore_errors: true
|
||||
tags:
|
||||
- delete_nodes
|
||||
- kubeadm_init
|
||||
|
||||
# Make it so iptables is configured to allow flannel and coredns pods to start and add iptables rules
|
||||
- name: iptables default policies need to be ACCEPT on all chains
|
||||
iptables:
|
||||
chain: '{{item}}'
|
||||
policy: ACCEPT
|
||||
with_items:
|
||||
- INPUT
|
||||
- FORWARD
|
||||
- OUTPUT
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# when the above issue is encountred it is neccessary to remove these files
|
||||
- name: Clean up cluster and etcd and cni
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /var/lib/etcd
|
||||
- /home/k8sadmin/.kube
|
||||
- /root/.kube
|
||||
- /etc/cni/net.d
|
||||
tags:
|
||||
- kubeadm_reset
|
||||
- kubeadm_init
|
||||
|
||||
|
||||
# Install and configure etcd
|
||||
- name: Download etcd version
|
||||
get_url:
|
||||
url: https://github.com/etcd-io/etcd/releases/download/v3.4.15/etcd-v3.4.15-linux-arm64.tar.gz
|
||||
dest: /home/k8sadmin
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Untar the binaries
|
||||
- name: Untar the binary
|
||||
unarchive:
|
||||
src: /home/k8sadmin/etcd-v3.4.15-linux-arm64.tar.gz
|
||||
dest: /home/k8sadmin
|
||||
remote_src: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Copy the etcd binaries to /usr/local/bin path
|
||||
- name: Move etcd-v3.4.15-linux-amd64/etcd* contensts to /usr/local/bin
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
with_items:
|
||||
- { src: /home/k8sadmin/etcd-v3.4.15-linux-arm64/etcd }
|
||||
- { src: /home/k8sadmin/etcd-v3.4.15-linux-arm64/etcdctl }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Create extra directories for etcd
|
||||
- name: make /etc/etcd and /var/lib/etcd directories
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
with_items:
|
||||
- { path: /etc/etcd }
|
||||
- { path: /var/lib/etcd }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Copy certs
|
||||
- name: Copy certifactes and keys to /etc/etcd
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: /etc/etcd
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- { src: /home/k8sadmin/ca.pem }
|
||||
- { src: /home/k8sadmin/k8s-master.pem }
|
||||
- { src: /home/k8sadmin/k8smasterkey.pem }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# The following steps configure the etcd daemon for systemd to start on startup
|
||||
|
||||
# Place a j2 template into /etc/systemd/system/etcd.service using variables
|
||||
- name: Create systemd etcd service
|
||||
template:
|
||||
src: etcd.service.j2
|
||||
dest: /etc/systemd/system/etcd.service
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Daemon reload so systemd can use the new service
|
||||
- name: Daemon reload so systemd can use the new service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
# Start and enabled etcd service
|
||||
- name: Start and enable the etcd service
|
||||
service:
|
||||
name: etcd
|
||||
state: started
|
||||
enabled: yes
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- etcd
|
||||
- kubeadm_init
|
||||
|
||||
################################################
|
||||
## Configure control plane and workers ##
|
||||
################################################
|
||||
|
||||
# Reset kubeadm
|
||||
- name: Reset kubeadm
|
||||
shell: kubeadm reset --force
|
||||
tags:
|
||||
- kubeadm_reset
|
||||
- kubeadm_init
|
||||
when: "'workers' or 'masters' in group_names"
|
||||
|
||||
- name: Copy kubeconfig for initializing the cluster
|
||||
template:
|
||||
src: cluster.kubeconfig.j2
|
||||
dest: /home/k8sadmin/cluster.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: kubeadm init
|
||||
shell: kubeadm init --config /home/k8sadmin/cluster.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Create $HOME/.kube directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- /home/k8sadmin/.kube
|
||||
- /root/.kube
|
||||
when: "'masters' or 'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Copy admin config to k8sadmin
|
||||
copy:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: "{{ item }}"
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
remote_src: yes
|
||||
loop:
|
||||
- /home/k8sadmin/.kube/config
|
||||
- /root/.kube/config
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Gather admin.conf
|
||||
find:
|
||||
paths: /home/k8sadmin/.kube/
|
||||
recurse: no
|
||||
patterns: "config"
|
||||
register: files_to_copy
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Fetch admin.conf to ansible controller
|
||||
fetch:
|
||||
src: "{{ item.path }}"
|
||||
dest: roles/bootstrap_cluster/files/
|
||||
flat: yes
|
||||
with_items: "{{ files_to_copy.files }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Distribute admin.conf to workers
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin/.kube/config"
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
with_items:
|
||||
- { src: config }
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Create a join token and hash of the ca and parse out token and hash into their respctive variables
|
||||
- name: Create token and hash ans parse them out
|
||||
shell: kubeadm token create --print-join-command > join.txt
|
||||
#register: results
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Parse join file for token and create token variable
|
||||
shell: "cat join.txt | awk '{ print $5 }'"
|
||||
register: token
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Create token var
|
||||
set_fact:
|
||||
token: "{{ token.stdout }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Output token variable
|
||||
debug:
|
||||
var: token
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Parse join file for hash and create hash variable
|
||||
shell: "cat join.txt | awk '{ print $7 }'"
|
||||
register: hash
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Create token var
|
||||
set_fact:
|
||||
hash: "{{ hash.stdout }}"
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: Output hash variable
|
||||
debug:
|
||||
var: hash
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- token
|
||||
|
||||
- name: add token and hash to dummy host to pass facts between hosts
|
||||
add_host:
|
||||
name: "192.168.50.240"
|
||||
token: "{{ token }}"
|
||||
hash: "{{ hash }}"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Copy kube-flannel.yml config to host
|
||||
- name: Copy kube-flannel.yml to host
|
||||
template:
|
||||
src: kube-flannel.j2
|
||||
dest: /home/k8sadmin/kube-flannel.yml
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Apply pod network with Flannel
|
||||
- name: Apply flannel
|
||||
shell: kubectl apply -f /home/k8sadmin/kube-flannel.yml
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Join worker nodes
|
||||
|
||||
- name: Join worker nodes to cluster
|
||||
shell: |
|
||||
kubeadm join 192.168.50.117:6443 \
|
||||
--token "{{ hostvars['192.168.50.240']['token'] }}" \
|
||||
--discovery-token-ca-cert-hash "{{ hostvars['192.168.50.240']['hash'] }}"
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
- join
|
||||
|
||||
- name: Sleep for 1 minute to give pods time to come up
|
||||
wait_for:
|
||||
timeout: 60
|
||||
delegate_to: localhost
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
- name: Restart containerd incase cni0 didnt get created
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeadm_init
|
||||
|
||||
# Join control nodes
|
||||
#- name: Join other control nodes to cluster
|
||||
# shell: |
|
||||
# kubeadm join 192.168.50.117:6443 \
|
||||
# --token {{ token.stdout }} \
|
||||
# --discovery-token-ca-cert-hash sha256:0ea3240343360022ebe06d56dc4d993ff9087c2a2910c7a238c95416596582f7 \
|
||||
# --control-plane
|
||||
|
||||
#kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml
|
||||
|
28
roles/bootstrap_cluster/templates/cluster.kubeconfig.j2
Normal file
28
roles/bootstrap_cluster/templates/cluster.kubeconfig.j2
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kubernetesVersion: 1.21.0
|
||||
kind: ClusterConfiguration
|
||||
controlPlaneEndpoint: "192.168.50.117:6443"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
advertise-address: 192.168.50.240
|
||||
encryption-provider-config: /etc/pki/encryption-config.yaml
|
||||
etcd:
|
||||
external:
|
||||
endpoints:
|
||||
- https://192.168.50.240:2379
|
||||
caFile: /etc/etcd/ca.pem
|
||||
certFile: /etc/etcd/k8s-master.pem
|
||||
keyFile: /etc/etcd/k8smasterkey.pem
|
||||
networking:
|
||||
podSubnet: 10.240.0.0/16
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
runtime-cgroups: /system.slice/containerd.service
|
||||
kubelet-cgroups: /systemd/system.slice
|
||||
container-runtime: remote
|
||||
container-runtime-endpoint: unix:///run/containerd/containerd.sock
|
||||
cgroup-driver: systemd
|
||||
criSocket: /var/run/containerd/containerd.sock
|
29
roles/bootstrap_cluster/templates/etcd.service.j2
Normal file
29
roles/bootstrap_cluster/templates/etcd.service.j2
Normal file
@ -0,0 +1,29 @@
|
||||
[Unit]
|
||||
Description=etcd
|
||||
Documentation=https://github.com/etcd-io/etcd/releases/tag/v3.4.15
|
||||
|
||||
[Service]
|
||||
Environment="ETCD_UNSUPPORTED_ARCH=arm64"
|
||||
ExecStart=/usr/local/bin/etcd \
|
||||
--name {{ k8smaster01_hostname }} \
|
||||
--cert-file=/etc/etcd/k8s-master.pem \
|
||||
--key-file=/etc/etcd/k8smasterkey.pem \
|
||||
--peer-cert-file=/etc/etcd/k8s-master.pem \
|
||||
--peer-key-file=/etc/etcd/k8smasterkey.pem\
|
||||
--trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--peer-trusted-ca-file=/etc/etcd/ca.pem \
|
||||
--peer-client-cert-auth \
|
||||
--client-cert-auth \
|
||||
--initial-advertise-peer-urls https://{{ k8smaster01_ip }}:2380 \
|
||||
--listen-peer-urls https://{{ k8smaster01_ip }}:2380 \
|
||||
--listen-client-urls https://{{ k8smaster01_ip }}:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://{{ k8smaster01_ip }}:2379 \
|
||||
--initial-cluster-token etcd-cluster-0 \
|
||||
--initial-cluster {{ cluster }} \
|
||||
--initial-cluster-state new \
|
||||
--data-dir=/var/lib/etcd
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
223
roles/bootstrap_cluster/templates/kube-flannel.j2
Normal file
223
roles/bootstrap_cluster/templates/kube-flannel.j2
Normal file
@ -0,0 +1,223 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: psp.flannel.unprivileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- configMap
|
||||
- secret
|
||||
- emptyDir
|
||||
- hostPath
|
||||
allowedHostPaths:
|
||||
- pathPrefix: "/etc/cni/net.d"
|
||||
- pathPrefix: "/etc/kube-flannel"
|
||||
- pathPrefix: "/run/flannel"
|
||||
readOnlyRootFilesystem: false
|
||||
# Users and groups
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
# Privilege Escalation
|
||||
allowPrivilegeEscalation: false
|
||||
defaultAllowPrivilegeEscalation: false
|
||||
# Capabilities
|
||||
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
|
||||
defaultAddCapabilities: []
|
||||
requiredDropCapabilities: []
|
||||
# Host namespaces
|
||||
hostPID: false
|
||||
hostIPC: false
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
# SELinux
|
||||
seLinux:
|
||||
# SELinux is unused in CaaSP
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['psp.flannel.unprivileged']
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"hairpinMode": true,
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "10.240.0.0/16",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flannel
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: flannel
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.14.0-rc1
|
||||
command:
|
||||
- cp
|
||||
args:
|
||||
- -f
|
||||
- /etc/kube-flannel/cni-conf.json
|
||||
- /etc/cni/net.d/10-flannel.conflist
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.14.0-rc1
|
||||
command:
|
||||
- /opt/bin/flanneld
|
||||
args:
|
||||
- --ip-masq
|
||||
- --kube-subnet-mgr
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
securityContext:
|
||||
privileged: false
|
||||
capabilities:
|
||||
add: ["NET_ADMIN", "NET_RAW"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run/flannel
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run/flannel
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
2
roles/bootstrap_cluster/tests/inventory
Normal file
2
roles/bootstrap_cluster/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/bootstrap_cluster/tests/test.yml
Normal file
5
roles/bootstrap_cluster/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- bootstrap_cluster
|
13
roles/bootstrap_cluster/vars/main.yml
Normal file
13
roles/bootstrap_cluster/vars/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
# vars file for bootstrap_cluster
|
||||
# IP addresses of masters
|
||||
k8smaster01_ip: 192.168.50.240
|
||||
#k8smaster02_ip: 192.168.50.202
|
||||
|
||||
|
||||
# Master Hostnames
|
||||
k8smaster01_hostname: k8smaster01
|
||||
#k8smaster02_hostname: k8smaster02
|
||||
|
||||
|
||||
cluster: k8smaster01=https://192.168.50.240:2380
|
29
roles/certificates/.travis.yml
Normal file
29
roles/certificates/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
100
roles/certificates/README.md
Normal file
100
roles/certificates/README.md
Normal file
@ -0,0 +1,100 @@
|
||||
Certificates
|
||||
=========
|
||||
#.GITIGNORE WARNING
|
||||
|
||||
##THIS ROLE WILL PUT ALL THE KEYS AND CERTS CREATED ONTO THE CONTROL NODE IN THE ROLES /FILES DIRECTORY. PLEASE AT THAT DIRECTORY TO YOUR .GITIGNORE SO YOU DO NOT UPLOAD YOUR KEYS AND CERTS TO GITHUB
|
||||
-------------
|
||||
Useful links:
|
||||
|
||||
- [Adding your own CA trusted to firefox](https://javorszky.co.uk/2019/11/06/get-firefox-to-trust-your-self-signed-certificates/)
|
||||
- [Adding your own CA to Debian host](https://unix.stackexchange.com/questions/90450/adding-a-self-signed-certificate-to-the-trusted-list)
|
||||
-------------
|
||||
Documentation
|
||||
|
||||
How to apply OpenSSL extensions:
|
||||
https://www.openssl.org/docs/man1.0.2/man5/x509v3_config.html
|
||||
|
||||
Ansible modules:
|
||||
- https://docs.ansible.com/ansible/2.7/modules/openssl_certificate_module.html
|
||||
- https://docs.ansible.com/ansible/2.4/openssl_csr_module.html
|
||||
- https://docs.ansible.com/ansible/2.5/modules/openssl_privatekey_module.html
|
||||
|
||||
.
|
||||
-------------
|
||||
Errors I Encountered
|
||||
|
||||
When generating some files I was getting:
|
||||
- "error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/home/user/.rnd"
|
||||
The fix was to comment out "RANDFILE = $ENV::HOME/.rnd" in /etc/ssl/openssl.cnf
|
||||
|
||||
I Also got this:
|
||||
- "error:0D07A097:asn1 encoding routines:ASN1_mbstring_ncopy:string too long:../crypto/asn1/a_mbstr.c:107:maxsize=2"
|
||||
If you see "maxsize=#" in the error it means you had more characters than allowed in a field. My case was I had more than 2 characters in the Country field.
|
||||
--------------
|
||||
Role to create certificates:
|
||||
|
||||
- Create a CA
|
||||
- Create keys, certiciate signing requests, and certificates
|
||||
- Fetch files from the host you configured these on TO the Ansible control node
|
||||
- Distribute certificates based on requirmentes
|
||||
|
||||
Manual Commands to match this playbook
|
||||
-------------
|
||||
These assume you're running sudo.
|
||||
|
||||
Install openssl:
|
||||
- apt-get install openssl
|
||||
|
||||
Create the CA private key
|
||||
- openssl genrsa -out ca-key.pem 2048
|
||||
|
||||
Create CA csr
|
||||
Creating openssl certs and CSR's requires configurations to be passed in for certain items like extensions. You can either create a .cfg file and pass it into the openssl command or specify the configuration as CONFIG= variable in the bash shell and then echo that variable.
|
||||
```
|
||||
CONFIG="
|
||||
distinguished_name = my_req_distinguished_name
|
||||
req_extensions = my_extensions
|
||||
prompt = no
|
||||
[ my_req_distinguished_name ]
|
||||
C = US
|
||||
ST = State
|
||||
L = City
|
||||
O = kubernetes
|
||||
CN = kubernetes
|
||||
[ my_extensions ]
|
||||
basicConstraints=critical,CA:TRUE
|
||||
keyUsage=critical, cRLSign, keyCertSign
|
||||
"
|
||||
```
|
||||
- openssl req -config <(echo "$CONFIG") -new -key ca-key.pem -out ca.csr
|
||||
|
||||
To View the CSR so you can verify it has all the right options you want:
|
||||
- openssl req -text -noout -verify -in ca.csr
|
||||
|
||||
Create the CA cert
|
||||
- openssl req -new -key ca-key.pem -in ca.csr -x509 -days 1000 -out ca.pem
|
||||
|
||||
You will repeat these steps; creating a key, csr, and cert over and over. HOWEVER the options in the $CONFIG variable will change depending on what the cert is for. CA:TRUE will only be applied for the CA. Everything else will get CA:FALSE. Pay attentions to key_usages and extended key_usages.
|
||||
|
||||
Documentation for openssl extensions can be found:
|
||||
https://www.openssl.org/docs/man1.0.2/man5/x509v3_config.html
|
||||
|
||||
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- A Sudo user on your hosts you wish to apply this to
|
||||
- An internet connection or openssl and required dependencies
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/certificates/defaults/main.yml
Normal file
2
roles/certificates/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for certificates
|
2
roles/certificates/handlers/main.yml
Normal file
2
roles/certificates/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for certificates
|
53
roles/certificates/meta/main.yml
Normal file
53
roles/certificates/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
618
roles/certificates/tasks/main.yml
Normal file
618
roles/certificates/tasks/main.yml
Normal file
@ -0,0 +1,618 @@
|
||||
---
|
||||
# tasks file for certificates
|
||||
|
||||
# Tasks to create a CA and Certificates for the Kubernetes cluster
|
||||
# The CA will be my NFS server host in this use case.
|
||||
|
||||
# I did find that using the openssl_csr module, the key_usage options did not like a comma
|
||||
# seperated list, but the YAML list did work.
|
||||
|
||||
# I got an error:
|
||||
# "Cannot parse Subject Alternative Name \" IP:192.168.50.240\" (potentially unsupported by cryptography backend)"
|
||||
# This was due to spaces between the comman sperated values in the subject_alt_name option in the openssl_csr module
|
||||
|
||||
|
||||
##########################################
|
||||
## CREATE CA ##
|
||||
##########################################
|
||||
# Create a directory to store certs
|
||||
- name: Create certs directory for storing CA stuff
|
||||
file:
|
||||
path: '{{ CA_DIR }}'
|
||||
state: directory
|
||||
tags:
|
||||
- certficates
|
||||
- ca
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create the CA private key
|
||||
- name: Generate CA private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/ca-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- ca
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Creates a CSR for the CA
|
||||
# Any CA cert must have the keyCertSign usage option
|
||||
- name: Generate CA CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/ca.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
basic_constraints: 'CA:TRUE'
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- cRLSign
|
||||
- keyCertSign
|
||||
key_usage_critical: True
|
||||
organizational_unit_name: kubernetes
|
||||
common_name: kubernetes
|
||||
tags:
|
||||
- certficates
|
||||
- ca
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Creare the CA cert from the CSR
|
||||
- name: Generate CA certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/ca.pem'
|
||||
privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
csr_path: '{{ CA_DIR }}/ca.csr'
|
||||
provider: selfsigned
|
||||
tags:
|
||||
- certficates
|
||||
- ca
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBE ADMIN CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create the k8sadmin private key
|
||||
- name: Generate Admin private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/admin-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- admin_cert
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create admin CSR
|
||||
- name: Generate Admin CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/admin.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/admin-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: k8sadmin
|
||||
organization_name: "system:masters"
|
||||
organizational_unit_name: kubernetes
|
||||
tags:
|
||||
- certficates
|
||||
- admin_cert
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create Admin cert. Using the CSR created above and the ca.pem generated in the first tasks,
|
||||
# can generate the certifiacte for the admin
|
||||
- name: Generate Admin certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/admin.pem'
|
||||
csr_path: '{{ CA_DIR }}/admin.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- admin_cert
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
|
||||
##########################################
|
||||
## MASTER CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create master keys
|
||||
- name: Generate Masters private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/k8smasterkey.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- master_cert
|
||||
- master
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Localhost and 127.0.0.1 are added for k8s services on controller nodes to access local k8s API
|
||||
# kubernetes.default is added because it can be used from inside the cluster to access the API
|
||||
# 10.32.0.1 is a well known address used by services and pods in the cluster
|
||||
# Create masters CSR
|
||||
- name: Generate Masters CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/k8smaster.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/k8smasterkey.pem'
|
||||
common_name: 'k8s-master'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
organization_name: 'system:masters'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
subject_alt_name: "DNS:kubernetes.default,IP:{{ groups['masters'][0] }},DNS:{{ k8smaster01_hostname }},IP:{{ groups['load_balancers'][0] }},DNS:{{ load_balancer_hostname }},IP:127.0.0.1,DNS:localhost,IP:{{ APISERVER_SERVICE_IP }}"
|
||||
tags:
|
||||
- certficates
|
||||
- master_cert
|
||||
- master
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create master cert using master CSR and CA.pem
|
||||
- name: Generate Masters certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/k8s-master.pem'
|
||||
csr_path: '{{ CA_DIR }}/k8smaster.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- master_cert
|
||||
- master
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBELET CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create worker keys
|
||||
- name: Generate Workers private keys
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/{{ item }}-key.pem'
|
||||
loop:
|
||||
- "{{ k8sworker01_hostname }}"
|
||||
- "{{ k8sworker02_hostname }}"
|
||||
- "{{ k8sworker03_hostname }}"
|
||||
tags:
|
||||
- certficates
|
||||
- worker_cert
|
||||
- worker
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create worker CSRs
|
||||
- name: Generate Workers CSRs
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/{{ item[0] }}.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/{{ item[0] }}-key.pem'
|
||||
common_name: 'system:node:{{ item[0] }}'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
organization_name: 'system:nodes'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
subject_alt_name: 'DNS:{{ item[0] }},IP:{{ item[1] }}'
|
||||
loop:
|
||||
- ["{{ k8sworker01_hostname }}", "{{ k8sworker01_ip }}" ]
|
||||
- ["{{ k8sworker02_hostname }}", "{{ k8sworker02_ip }}" ]
|
||||
- ["{{ k8sworker03_hostname }}", "{{ k8sworker03_ip }}" ]
|
||||
tags:
|
||||
- certficates
|
||||
- worker_cert
|
||||
- worker
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
|
||||
# Create worker Certs
|
||||
- name: Generate Workers certificates
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/{{ item }}.pem'
|
||||
csr_path: '{{ CA_DIR }}/{{ item }}.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
loop:
|
||||
- "{{ k8sworker01_hostname }}"
|
||||
- "{{ k8sworker02_hostname }}"
|
||||
- "{{ k8sworker03_hostname }}"
|
||||
tags:
|
||||
- certficates
|
||||
- worker_cert
|
||||
- worker
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBEPROXY CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create kubeproxy key
|
||||
- name: Generating Kube Proxy private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/kube-proxy-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- kubeproxy_cert
|
||||
- kubeproxy
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kubeproxy CSR
|
||||
- name: Generate Kube Proxy CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/kube-proxy.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/kube-proxy-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: 'system:kube-proxy'
|
||||
organization_name: 'system:node-proxier'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
tags:
|
||||
- certficates
|
||||
- kubeproxy_cert
|
||||
- kubeproxy
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kubeproxy cert
|
||||
- name: Generate Kube Proxy certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/kube-proxy.pem'
|
||||
csr_path: '{{ CA_DIR }}/kube-proxy.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- kubeproxy_cert
|
||||
- kubeproxy
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBE SCHEDULER CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create kube scheduler key
|
||||
- name: Generating Kube scheduler private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/kube-scheduler-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- kubescheduler_cert
|
||||
- kubescheduler
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kube scheduler CSR
|
||||
- name: Generate Kube scheduler CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/kube-scheduler.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/kube-scheduler-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: 'system:kube-scheduler'
|
||||
organization_name: 'system:kube-scheduler'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
tags:
|
||||
- certficates
|
||||
- kubescheduler_cert
|
||||
- kubescheduler
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kube scheduler cert
|
||||
- name: Generate Kube scheduler certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/kube-scheduler.pem'
|
||||
csr_path: '{{ CA_DIR }}/kube-scheduler.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- kubescheduler_cert
|
||||
- kubescheduler
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBE CONTROLLER MANAGER CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create kube controller manager key
|
||||
- name: Generating Kube controller-manager private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/kube-controller-manager-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- kubecontroller_cert
|
||||
- kubecontroller
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kube controller manager CSR
|
||||
- name: Generate Kube controller-manager CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/kube-controller-manager.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/kube-controller-manager-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: 'system:kube-controller-manager'
|
||||
organization_name: 'system:kube-controller-manager'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
tags:
|
||||
- certficates
|
||||
- kubecontroller_cert
|
||||
- kubecontroller
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create kube controller manager cert
|
||||
- name: Generate Kube controller-manager certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/kube-controller-manager.pem'
|
||||
csr_path: '{{ CA_DIR }}/kube-controller-manager.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- kubecontroller_cert
|
||||
- kubecontroller
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## CREATE SERVICE ACCOUNT KEY PAIR ##
|
||||
##########################################
|
||||
|
||||
# This certificate is used to sign service account tokens
|
||||
|
||||
# Create service-account key
|
||||
- name: Generating service-account private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/service-account-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- serviceaccount_cert
|
||||
- serviceaccount
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create service-account CSR
|
||||
- name: Generate service-account CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/service-account.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/service-account-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: 'service-accounts'
|
||||
organization_name: 'kubernetes'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
tags:
|
||||
- certficates
|
||||
- serviceaccount_cert
|
||||
- serviceaccount
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create service-account cert
|
||||
- name: Generate service-account certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/service-account.pem'
|
||||
csr_path: '{{ CA_DIR }}/service-account.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- serviceaccount_cert
|
||||
- serviceaccount
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## KUBE DASHBOARD CERTS ##
|
||||
##########################################
|
||||
|
||||
# Create dashboard key
|
||||
- name: Generate k8s Dashboard private key
|
||||
openssl_privatekey:
|
||||
path: '{{ CA_DIR }}/k8s-dashboard-key.pem'
|
||||
tags:
|
||||
- certficates
|
||||
- kubedashboard_cert
|
||||
- kubedashboard
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create dashboard CSR
|
||||
- name: Generate k8s Dashboard CSR
|
||||
openssl_csr:
|
||||
path: '{{ CA_DIR }}/k8s-dashboard.csr'
|
||||
privatekey_path: '{{ CA_DIR }}/k8s-dashboard-key.pem'
|
||||
basic_constraints: "CA:FALSE"
|
||||
basic_constraints_critical: True
|
||||
key_usage:
|
||||
- digitalSignature
|
||||
- keyEncipherment
|
||||
key_usage_critical: True
|
||||
extended_key_usage:
|
||||
- serverAuth
|
||||
- clientAuth
|
||||
common_name: 'k8s-Dashboard'
|
||||
organization_name: 'addons:Dashboard'
|
||||
organizational_unit_name: 'kubernetes'
|
||||
tags:
|
||||
- certficates
|
||||
- kubedashboard_cert
|
||||
- kubedashboard
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create dashboard cert
|
||||
- name: Generate k8s Dashboard certificate
|
||||
openssl_certificate:
|
||||
path: '{{ CA_DIR }}/k8s-dashboard.pem'
|
||||
csr_path: '{{ CA_DIR }}/k8s-dashboard.csr'
|
||||
ownca_path: '{{ CA_DIR }}/ca.pem'
|
||||
ownca_privatekey_path: '{{ CA_DIR }}/ca-key.pem'
|
||||
provider: ownca
|
||||
tags:
|
||||
- certficates
|
||||
- kubedashboard_cert
|
||||
- kubedashboard
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create cert bundle for dashboard
|
||||
- name: Generate k8s-dashboard bundle
|
||||
shell: "cat {{ CA_DIR }}/k8s-dashboard.pem {{ CA_DIR }}/k8s-dashboard-key.pem > {{ CA_DIR }}/k8s-dashboard.bundle"
|
||||
args:
|
||||
creates: '{{ CA_DIR }}/k8s-dashboard.bundle'
|
||||
tags:
|
||||
- certficates
|
||||
- kubedashboard_cert
|
||||
- kubedashboard
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
# Create encryption-config
|
||||
#- name: Generate encryption-config.yml
|
||||
# shell: echo "{{ encryption_config }}" > {{ CA_DIR }}/encryption-config.yml
|
||||
# args:
|
||||
# creates: '{{ CA_DIR }}/encryption-config.yml'
|
||||
# tags:
|
||||
# - certficates
|
||||
# - kubedashboard_cert
|
||||
# - kubedashboard
|
||||
# when: inventory_hostname == groups['management'][0]
|
||||
|
||||
##########################################
|
||||
## GATHER CERTS FOR DISTRIBUTION ##
|
||||
##########################################
|
||||
##########################################
|
||||
## WARNING: ADD THIS ROLES /FILES ##
|
||||
## DIRECTORY TO YOUR .GITIGNORE ##
|
||||
## OR EVERYONE WILL ##
|
||||
# HAVE YOUR CERTS ##
|
||||
##########################################
|
||||
- name: Gather the cert files to be fetched
|
||||
find:
|
||||
paths: /root/k8scerts
|
||||
recurse: no
|
||||
patterns: "*"
|
||||
register: files_to_copy
|
||||
tags:
|
||||
- certificates
|
||||
- fetch
|
||||
- distribute
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
- name: Fetch certs from CA and place them into this roles file directory
|
||||
fetch:
|
||||
src: "{{ item.path }}"
|
||||
dest: roles/certificates/files/
|
||||
flat: yes
|
||||
with_items: "{{ files_to_copy.files }}"
|
||||
tags:
|
||||
- certificates
|
||||
- fetch
|
||||
- distribute
|
||||
when: inventory_hostname == groups['management'][0]
|
||||
|
||||
- name: Distribute worker01 certs
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin"
|
||||
with_items:
|
||||
- { src: ca.pem }
|
||||
- { src: k8sworker01-key.pem }
|
||||
- { src: k8sworker01.pem }
|
||||
- { src: kube-proxy.pem }
|
||||
- { src: kube-proxy-key.pem}
|
||||
tags:
|
||||
- certificates
|
||||
- distribute
|
||||
#when: inventory_hostname == groups['workers'][0]
|
||||
when: ansible_hostname == 'k8sworker01'
|
||||
|
||||
- name: Distribute worker02 certs
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin"
|
||||
with_items:
|
||||
- { src: ca.pem }
|
||||
- { src: k8sworker02-key.pem }
|
||||
- { src: k8sworker02.pem }
|
||||
- { src: kube-proxy.pem }
|
||||
- { src: kube-proxy-key.pem}
|
||||
tags:
|
||||
- certificates
|
||||
- distribute
|
||||
#when: inventory_hostname == groups['workers'][1]
|
||||
when: ansible_hostname == 'k8sworker02'
|
||||
|
||||
- name: Distribute worker03 certs
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin"
|
||||
with_items:
|
||||
- { src: ca.pem }
|
||||
- { src: k8sworker03-key.pem }
|
||||
- { src: k8sworker03.pem }
|
||||
- { src: kube-proxy.pem }
|
||||
- { src: kube-proxy-key.pem}
|
||||
tags:
|
||||
- certificates
|
||||
- distribute
|
||||
#when: inventory_hostname == groups['workers'][2]
|
||||
when: ansible_hostname == 'k8sworker03'
|
||||
|
||||
- name: Distribute master01 certs
|
||||
copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/home/k8sadmin"
|
||||
with_items:
|
||||
- { src: ca.pem }
|
||||
- { src: ca-key.pem }
|
||||
- { src: k8smasterkey.pem }
|
||||
- { src: k8s-master.pem }
|
||||
- { src: service-account-key.pem }
|
||||
- { src: service-account.pem }
|
||||
- { src: kube-controller-manager-key.pem }
|
||||
- { src: kube-controller-manager.pem}
|
||||
- { src: kube-scheduler-key.pem }
|
||||
- { src: kube-scheduler.pem}
|
||||
- { src: admin-key.pem }
|
||||
- { src: admin.pem}
|
||||
tags:
|
||||
- certificates
|
||||
- distribute
|
||||
when: inventory_hostname == groups['masters'][0]
|
2
roles/certificates/tests/inventory
Normal file
2
roles/certificates/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/certificates/tests/test.yml
Normal file
5
roles/certificates/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- certificates
|
28
roles/certificates/vars/main.yml
Normal file
28
roles/certificates/vars/main.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
# vars file for certificates
|
||||
|
||||
# The directory on the CA host where all the keys, CSRs, and certificates will be stored
|
||||
CA_DIR: /root/k8scerts
|
||||
|
||||
# Well known IP used internally by the k8s cluster
|
||||
APISERVER_SERVICE_IP: 10.32.0.1
|
||||
|
||||
# The hostnames of the 3 worker nodes in the cluster
|
||||
k8sworker01_hostname: k8sworker01
|
||||
k8sworker02_hostname: k8sworker02
|
||||
k8sworker03_hostname: k8sworker03
|
||||
|
||||
# The IP of the 3 worker nodes in the cluster
|
||||
k8sworker01_ip: 192.168.50.177
|
||||
k8sworker02_ip: 192.168.50.202
|
||||
k8sworker03_ip: 192.168.50.30
|
||||
|
||||
# Load balancer hostname to add to the list of names for the controller/master cert
|
||||
load_balancer_hostname: k8sbalancer01
|
||||
load_balancer_ip: 192.168.50.117
|
||||
|
||||
# Controller/master hostname to add to the list of names for the controller/master cert
|
||||
k8smaster01_hostname: k8smaster01
|
||||
k8smaster01_ip: 192.168.50.240
|
||||
#
|
||||
#encryption_config
|
83
roles/configure_hosts/README.md
Normal file
83
roles/configure_hosts/README.md
Normal file
@ -0,0 +1,83 @@
|
||||
Configure Hosts
|
||||
=========
|
||||
Role to configure day one bootstrapping of hosts including:
|
||||
|
||||
- hostnames
|
||||
- /etc/hosts file
|
||||
- Add an administator user with sudo abilities
|
||||
- Change the root password
|
||||
- Distribute ssh key to hosts
|
||||
- Change the login banner
|
||||
- Lock the ubuntu account
|
||||
|
||||
Manual Commands to match this playbook
|
||||
-------------
|
||||
These assume you're running sudo. The hostname, hosts file, and user will all need to be done on each machine you want them on.
|
||||
|
||||
To set a hostname:
|
||||
- hostnamectl set-hostname
|
||||
|
||||
To edit /etc/hosts:
|
||||
- vi /etc/hosts
|
||||
* Use "i" to enter insert mode and use the arrow keys to move around
|
||||
* Hit "Esc" to exit insert mode and type ":wq" to write and quit the file
|
||||
|
||||
To change the root password:
|
||||
- passswd root
|
||||
|
||||
To add a user:
|
||||
- useradd k8sadmin -c "kubernetes admin" -s /bin/bash
|
||||
|
||||
To add a user to the sudo group:
|
||||
- usermod -aG sudo k8sadmin
|
||||
|
||||
To change the password for the user:
|
||||
- passwd k8sadmin
|
||||
|
||||
To make users home directory:
|
||||
- mkdir /home/k8sadmin && chown k8sadmin:k8sadmin /home/k8sadmin
|
||||
|
||||
To lock the ubuntu account:
|
||||
- usermod -L ubuntu
|
||||
|
||||
To create ssh keys for the user:
|
||||
- ssh-keygen (follow the prompts or hint "Enter" 3 times)
|
||||
|
||||
To Edit the login banner:
|
||||
- vi /etc/ssh/sshd_config
|
||||
- Change the "#Banner none" line to "Banner /etc/issue"
|
||||
- Save the file
|
||||
- systemctl restart sshd
|
||||
- vi /etc/issue
|
||||
- Paste whatever you want
|
||||
|
||||
This one only needs to be done from the machine you will manage all of the others from
|
||||
|
||||
To copy your ssh keys to the other hosts:
|
||||
- ssh-copy-id k8sadmin@k8sworker01 (do this for each host)
|
||||
|
||||
Encrypting passwords
|
||||
------------
|
||||
|
||||
* Create vault.pass in the playbook directory with a password that will be used to encrypt and decrypt with ansible vault
|
||||
* Create a .gitignore file and place the name of the vault.pass file in it
|
||||
* vi /etc/ansible/ansible.cfg and change the "vault_password_file = /home/user/kubernetes/Kubernetes-Home-Lab/pass.vault" To match your vault.pass file path
|
||||
* mkpasswd --method=SHA-512 ( Copy this hashed password when you're done with this command)
|
||||
* Run "ansible-vault encrypt_string 'hashed_password_to_encrypt' --name 'root_password'" ( The above command prevents you from using "--vault-password-file" in your command )
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- A Sudo user on your hosts you wish to apply this to
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/configure_hosts/defaults/main.yml
Normal file
2
roles/configure_hosts/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for configure_hosts
|
2
roles/configure_hosts/handlers/main.yml
Normal file
2
roles/configure_hosts/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for configure_hosts
|
57
roles/configure_hosts/meta/main.yml
Normal file
57
roles/configure_hosts/meta/main.yml
Normal file
@ -0,0 +1,57 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Some suggested licenses:
|
||||
# - BSD (default)
|
||||
# - MIT
|
||||
# - GPLv2
|
||||
# - GPLv3
|
||||
# - Apache
|
||||
# - CC-BY
|
||||
license: license (GPLv2, CC-BY, etc)
|
||||
|
||||
min_ansible_version: 1.2
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
# Optionally specify the branch Galaxy will use when accessing the GitHub
|
||||
# repo for this role. During role install, if no tags are available,
|
||||
# Galaxy will use this branch. During import Galaxy will access files on
|
||||
# this branch. If Travis integration is configured, only notifications for this
|
||||
# branch will be accepted. Otherwise, in all cases, the repo's default branch
|
||||
# (usually master) will be used.
|
||||
#github_branch:
|
||||
|
||||
#
|
||||
# platforms is a list of platforms, and each platform has a name and a list of versions.
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
126
roles/configure_hosts/tasks/main.yml
Normal file
126
roles/configure_hosts/tasks/main.yml
Normal file
@ -0,0 +1,126 @@
|
||||
---
|
||||
# tasks file for configure_hosts
|
||||
|
||||
|
||||
# Assign worker nodes hostnames
|
||||
- name: Assign hostname for worker 01
|
||||
hostname:
|
||||
name: "{{ k8s_worker_01 }}"
|
||||
use: systemd
|
||||
when: inventory_hostname == '192.168.50.177'
|
||||
tags:
|
||||
- worker
|
||||
- ip_address
|
||||
|
||||
- name: Assign hostname for worker 02
|
||||
hostname:
|
||||
name: "{{ k8s_worker_02 }}"
|
||||
when: inventory_hostname == '192.168.50.202'
|
||||
tags:
|
||||
- worker
|
||||
- ip_address
|
||||
|
||||
- name: Assign hostname for worker 03
|
||||
hostname:
|
||||
name: "{{ k8s_worker_03 }}"
|
||||
when: inventory_hostname == '192.168.50.30'
|
||||
tags:
|
||||
- worker
|
||||
- ip_address
|
||||
|
||||
# Assign API Master Server Hostname
|
||||
- name: Assign hostname for K8's Master
|
||||
hostname:
|
||||
name: "{{ k8s_master_01 }}"
|
||||
when: inventory_hostname == '192.168.50.240'
|
||||
tags:
|
||||
- master
|
||||
- ip_address
|
||||
|
||||
# Assign Load Balancer Hostname
|
||||
- name: Assign hostname for Load Balancer
|
||||
hostname:
|
||||
name: "{{ k8s_balancer_01 }}"
|
||||
when: inventory_hostname == '192.168.50.117'
|
||||
tags:
|
||||
- load
|
||||
- ip_address
|
||||
|
||||
|
||||
# Assign NFS/TFTP Server Hostname
|
||||
- name: Assign hostnames
|
||||
hostname:
|
||||
name: "{{ management_01 }}"
|
||||
when: inventory_hostname == '192.168.50.113'
|
||||
tags:
|
||||
- management
|
||||
- ip_address
|
||||
|
||||
# Copy /etc/hosts file
|
||||
- name: Copy /etc/hosts
|
||||
template:
|
||||
src: hosts.j2
|
||||
dest: /etc/hosts
|
||||
tags:
|
||||
- hosts
|
||||
|
||||
# Change the root password
|
||||
- name: Change the root password
|
||||
user:
|
||||
name: root
|
||||
update_password: always
|
||||
password: "{{ root_password }}"
|
||||
tags:
|
||||
- manage_users
|
||||
|
||||
# Create Kubernetes Admin
|
||||
- name: Add k8sadmin to cluster
|
||||
user:
|
||||
name: "{{ kubernetes_admin }}"
|
||||
comment: Kubernetes Admin
|
||||
shell: /bin/bash
|
||||
password: "{{ k8s_admin_password}}"
|
||||
groups: sudo
|
||||
append: yes
|
||||
create_home: yes
|
||||
generate_ssh_key: yes
|
||||
ssh_key_bits: 2048
|
||||
ssh_key_file: .ssh/id_rsa
|
||||
tags:
|
||||
- manage_users
|
||||
|
||||
# Lock the default ubuntu account
|
||||
- name: Lock the default ubuntu account
|
||||
user:
|
||||
name: ubuntu
|
||||
password_lock: yes
|
||||
tags:
|
||||
- manage_users
|
||||
|
||||
# Change the login banner
|
||||
- name: Change the login banner
|
||||
template:
|
||||
src: issue.j2
|
||||
dest: /etc/issue
|
||||
tags:
|
||||
- banner
|
||||
|
||||
# Change SSH login banner path
|
||||
- name: Change Banner option in /etc/ssh/sshd_config
|
||||
replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '#Banner none'
|
||||
replace: 'Banner /etc/issue'
|
||||
tags:
|
||||
- banner
|
||||
|
||||
# Restart sshd service
|
||||
- name: Restsart sshd service
|
||||
service:
|
||||
name: sshd
|
||||
state: restarted
|
||||
tags:
|
||||
- banner
|
||||
|
||||
|
||||
|
23
roles/configure_hosts/templates/hosts.j2
Normal file
23
roles/configure_hosts/templates/hosts.j2
Normal file
@ -0,0 +1,23 @@
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 ubuntu
|
||||
|
||||
# Workers
|
||||
{{ worker_address_01 }} {{ k8s_worker_01 }}
|
||||
{{ worker_address_02 }} {{ k8s_worker_02 }}
|
||||
{{ worker_address_03 }} {{ k8s_worker_03 }}
|
||||
|
||||
# Masters
|
||||
{{ master_address_01 }} {{ k8s_master_01 }}
|
||||
|
||||
# Load Balancer
|
||||
{{ balancer_address_01 }} {{ k8s_balancer_01 }}
|
||||
|
||||
# Management
|
||||
{{ management_address_01 }} {{ management_01}}
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
8
roles/configure_hosts/templates/issue.j2
Normal file
8
roles/configure_hosts/templates/issue.j2
Normal file
@ -0,0 +1,8 @@
|
||||
Welcome and please do not do illegal stuff!
|
||||
|
||||
●
|
||||
/\__\__/\
|
||||
/ \
|
||||
\(ミ ⌒ ● ⌒ ミ)/ ★KUPO★
|
||||
|
||||
★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★
|
2
roles/configure_hosts/tests/inventory
Normal file
2
roles/configure_hosts/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/configure_hosts/tests/test.yml
Normal file
5
roles/configure_hosts/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- configure_hosts
|
75
roles/configure_hosts/vars/main.yml
Normal file
75
roles/configure_hosts/vars/main.yml
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
# vars file for configure_hosts
|
||||
#########################################
|
||||
#### Begin Hostnames and IP Addressess ####
|
||||
# Worker Node Addresses
|
||||
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_01: 10.0.4.102
|
||||
k8s_worker_01: kworker-001
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_02: 10.0.4.103
|
||||
k8s_worker_02: kworker-002
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_03: 10.0.4.104
|
||||
k8s_worker_03: kworker-003
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_04: 10.0.4.105
|
||||
k8s_worker_04: kworker-004
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_05: 10.0.4.106
|
||||
k8s_worker_05: kworker-005
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_06: 10.0.4.107
|
||||
k8s_worker_06: kworker-006
|
||||
# 8GB RAM / 64GB Storage
|
||||
worker_address_07: 10.0.4.108
|
||||
k8s_worker_07: kworker-007
|
||||
|
||||
#########################################
|
||||
# Master Node Addressess
|
||||
#8GB RAM / 64GB Storage
|
||||
master_address_01: 10.0.4.101
|
||||
k8s_master_01: kmaster-01
|
||||
|
||||
#########################################
|
||||
# Load Balancer Addressess
|
||||
#8GB RAM / 64GB Storage
|
||||
balancer_address_01: 10.0.4.100
|
||||
k8s_balancer_01: kbalancer-01
|
||||
|
||||
|
||||
#########################################
|
||||
# NFS/TFTP - Other Management Addressess
|
||||
# 8GB RAM / 64GB Storage
|
||||
management_address_01: 192.168.50.113
|
||||
management_01: management01
|
||||
|
||||
#### End Hostnames and IP Addresses ####
|
||||
#### Begin Usernames and Passwords ####
|
||||
root_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35343631313338656635383933306363653966343263346432383062643362393265663861623336
|
||||
3761353061353832396139373238666139393635653636360a306463633831313833323264623930
|
||||
33376138666235636264336436336239653732616334326564396333353539393238313032613335
|
||||
3633396462636135380a363332623263623231663930386536626239316161366434376438646163
|
||||
30616466333436633939306237333731313232623534623633653862636465636632623034646239
|
||||
62666662303539373638626566313931626433383361313265316236323132363766356339343635
|
||||
38666132363737343438336335643039343465376136376461313434613434383166653238386538
|
||||
62393131393131356638613562396237623235633636353137333531326636326335353566373132
|
||||
39616233356163623532363161366266393333633263393362626263373665653035
|
||||
kubernetes_admin: k8sadmin
|
||||
k8s_admin_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64343530616230663338343238323235636538393062636434386234393134666439316332613666
|
||||
6365323463313235653630613366383933373764643136360a353631633465393739343530383234
|
||||
33366537373131336335333566333535623134663565643064633763616466396436643930313033
|
||||
6136613330323065650a663532616463363537333164323432616335303438656534663534353239
|
||||
64303966633764636462376231353934663633623363656634353435303565333837376166366366
|
||||
64376165613261656664393635316232306632383363353866373765373362666631353031343966
|
||||
38613831636631656631313765373636373134376331386566333133363030366535643861623564
|
||||
34633032333065303031613133303664356335363262613330626333653939323332326332363830
|
||||
37636236663365336463663533363735366463363237653436343361313631376365
|
||||
|
||||
|
||||
#### End Usernames and Passwords ####
|
29
roles/kubeadm_install/.travis.yml
Normal file
29
roles/kubeadm_install/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
123
roles/kubeadm_install/README.md
Normal file
123
roles/kubeadm_install/README.md
Normal file
@ -0,0 +1,123 @@
|
||||
Kubeadm Install
|
||||
=========
|
||||
Role to configure prerequisites for installing a Kubeadm cluster
|
||||
|
||||
- Remove existing repos and gpg keys
|
||||
- Open firewalld ports
|
||||
- Disable swap
|
||||
- Load modules and edit sysctl
|
||||
- Install containerd
|
||||
- Install kubelet, kubeadm, and kubectl
|
||||
|
||||
Manual Commands to match this playbook
|
||||
-------------
|
||||
These assume you're running sudo.
|
||||
|
||||
To ensure the gpg keys and repos are removed:
|
||||
- rm -rf /etc/apt/sources.list.d/kubernetes.list
|
||||
- rm -rf /usr/share/keyrings kubernetes-archive-keyring.gpg
|
||||
- rm -rf /etc/apt/sources.list.d/docker.list
|
||||
- rm -rf /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
|
||||
|
||||
To Open firewalld ports, restart, and enable firewalld: ( Do the --add-port= command for each port)
|
||||
- firewall-cmd --permanent --add-port=6443/tcp
|
||||
- systemctl restart firewalld
|
||||
- systemctl enable firewalld
|
||||
|
||||
To disable swap:
|
||||
- swapoff -a
|
||||
- Edit /etc/fstab
|
||||
* Comment out the swap line
|
||||
|
||||
To check if br_netfilter and overlay modules are loaded and load them:
|
||||
- lsmod | grep br_netfilter ( if nothing is output, its not loaded)
|
||||
* modprobe br_netfilter
|
||||
- lsmod | grep overlay
|
||||
* modprobe overlay
|
||||
|
||||
Add modules to a modules-load.d config
|
||||
- vi /etc/modules-load.d/k8s.conf
|
||||
- Add the below to the file
|
||||
* overlay
|
||||
* br_netfilter
|
||||
- hit ESC and type :wq to save and quit
|
||||
|
||||
Add sysctl configs to /etc/sysctl.d
|
||||
- vi /etc/sysctl.d/k8s.conf
|
||||
- Add the below lines to the file
|
||||
* net.bridge.bridge-nf-call-ip6tables = 1
|
||||
* net.bridge.bridge-nf-call-iptables = 1
|
||||
* net.ipv4.ip_forward = 1
|
||||
- hit ESC and type :wq to save and quit
|
||||
|
||||
To apply the sysctl changes now type:
|
||||
- sysctl --system
|
||||
|
||||
To install required packages to install containerd
|
||||
- apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
|
||||
|
||||
Add docker official gpg key
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
|
||||
Setup Stable docker repository
|
||||
- echo \
|
||||
"deb [arch=arm64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
Update repo lists
|
||||
- apt-get update
|
||||
|
||||
Install containerd
|
||||
- apt-get install containerd.io
|
||||
|
||||
Make /etc/containerd directory
|
||||
- mkdir /etc/containerd
|
||||
|
||||
Set containerd config default
|
||||
- containerd config default | sudo tee /etc/containerd/config.toml
|
||||
|
||||
Restart containerd
|
||||
- systemctl restart containerd
|
||||
|
||||
Add lines to the end of /etc/containerd/config.toml
|
||||
- vi /etc/containerd/config.toml
|
||||
* [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
* [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
* SystemdCgroup = true
|
||||
- hit ESC and type :wq to save and quit
|
||||
|
||||
Restart containerd
|
||||
- systemctl restart containerd
|
||||
|
||||
Download google cloud GPG key
|
||||
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
|
||||
Setup kubernetes repository
|
||||
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
||||
|
||||
Update repo lists
|
||||
- apt-get update
|
||||
|
||||
To Install kubeadm, kubectl, and kubelet
|
||||
- apt-get install kubeadm kubectl kubelet
|
||||
|
||||
------------
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- A Sudo user on your hosts you wish to apply this to
|
||||
- An internet connection
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/kubeadm_install/defaults/main.yml
Normal file
2
roles/kubeadm_install/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for kubeadm_install
|
2
roles/kubeadm_install/handlers/main.yml
Normal file
2
roles/kubeadm_install/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for kubeadm_install
|
53
roles/kubeadm_install/meta/main.yml
Normal file
53
roles/kubeadm_install/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
255
roles/kubeadm_install/tasks/main.yml
Normal file
255
roles/kubeadm_install/tasks/main.yml
Normal file
@ -0,0 +1,255 @@
|
||||
---
|
||||
# tasks file for kubeadm_install
|
||||
###############################
|
||||
### Pre-reqs ###
|
||||
###############################
|
||||
|
||||
# Check to see if these exist. If they do remove them. Not removing them will cause issues for every run of this playbook after the first
|
||||
- name: Remove existing gpg keys and repos to prevent issues
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- { path: /etc/apt/sources.list.d/kubernetes.list }
|
||||
- { path: /usr/share/keyrings/kubernetes-archive-keyring.gpg }
|
||||
- { path: /etc/apt/sources.list.d/docker.list }
|
||||
- { path: /usr/share/keyrings/docker-archive-keyring.gpg }
|
||||
|
||||
###############################
|
||||
### Open Firewalld Ports ###
|
||||
###############################
|
||||
|
||||
# Install Firewalld and netfilter-persistent
|
||||
- name: Install firewalld and ( netfilter-persistent Debian only )
|
||||
apt:
|
||||
pkg:
|
||||
- firewalld
|
||||
- netfilter-persistent
|
||||
state: present
|
||||
tags:
|
||||
- firewalld
|
||||
- iptables
|
||||
|
||||
# Open Required Master Ports
|
||||
- name: open ports ( MASTERS )
|
||||
firewalld:
|
||||
port: "{{ item.port }}"
|
||||
|
||||
permanent: yes
|
||||
state: enabled
|
||||
with_items:
|
||||
- { port: 6443/tcp }
|
||||
- { port: 8285/udp }
|
||||
- { port: 8472/tcp }
|
||||
- { port: 8080/tcp }
|
||||
- { port: 2379-2380/tcp }
|
||||
- { port: 10250-10252/tcp }
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- firewalld
|
||||
|
||||
# Opern Required Worker Ports
|
||||
- name: open ports ( WORKERS )
|
||||
firewalld:
|
||||
port: "{{ item.port }}"
|
||||
permanent: yes
|
||||
state: enabled
|
||||
with_items:
|
||||
- { port: 10250/tcp }
|
||||
- { port: 8285/udp }
|
||||
- { port: 8472/tcp }
|
||||
- { port: 8080/tcp }
|
||||
- { port: 30000-32767/tcp }
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- firewalld
|
||||
|
||||
# Turn on and Enable Firewalld
|
||||
- name: Turn on and enable firewalld
|
||||
service:
|
||||
name: firewalld
|
||||
state: restarted
|
||||
enabled: yes
|
||||
tags:
|
||||
- firewalld
|
||||
|
||||
# Make it so iptables is configured to allow flannel and coredns pods to start and add iptables rules
|
||||
- name: iptables default policies need to be ACCEPT on all chains
|
||||
iptables:
|
||||
chain: '{{item}}'
|
||||
policy: ACCEPT
|
||||
with_items:
|
||||
- INPUT
|
||||
- FORWARD
|
||||
- OUTPUT
|
||||
tags:
|
||||
- iptables
|
||||
|
||||
- name: save iptables rules (Debian)
|
||||
shell: netfilter-persistent save
|
||||
tags:
|
||||
- iptables
|
||||
|
||||
#############################
|
||||
### Disable SWAP ###
|
||||
#############################
|
||||
|
||||
# Disable swap right now
|
||||
- name: disable swap NOW
|
||||
shell: /usr/sbin/swapoff -a
|
||||
|
||||
# Use if you have swap in your /etc/fstab file to comment out the swap line for presistence
|
||||
#- name: Disable swap persistently
|
||||
# command: sudo sed -i '/ swap / s/^/#/' /etc/fstab
|
||||
|
||||
##########################################
|
||||
## LETTING IPTABLES SEE BRIDGED TRAFFIC ##
|
||||
#########################################
|
||||
|
||||
# Load br_netfilter and overlay module
|
||||
- name: Load required modules
|
||||
modprobe:
|
||||
name: "{{ item.name }}"
|
||||
state: present
|
||||
with_items:
|
||||
- { name: br_netfilter }
|
||||
- { name: overlay }
|
||||
|
||||
# Create config to ensure modules are loaded on reboots
|
||||
- name: Place k8s.conf in modules-load.d
|
||||
template:
|
||||
src: k8s_modules.conf.j2
|
||||
dest: /etc/modules-load.d/k8s.conf
|
||||
|
||||
# Ensure sysctl options are set to allow proper network operation
|
||||
- name: Adding /etc/sysctl.d/k8s.conf
|
||||
template:
|
||||
src: k8s_sysctl.conf.j2
|
||||
dest: /etc/sysctl.d/k8s.conf
|
||||
|
||||
# Apply the sysctl changes made right now
|
||||
- name: Apply sysctl changes
|
||||
command: /usr/sbin/sysctl --system
|
||||
|
||||
# Add cgroups to cmdline
|
||||
- name: Add cgroups to cmdline
|
||||
template:
|
||||
src: cmdline.txt.j2
|
||||
dest: /boot/firmware/cmdline.txt
|
||||
register: task_result
|
||||
|
||||
- name: Reboot immediately if there was a change.
|
||||
shell: "sleep 5 && reboot"
|
||||
async: 1
|
||||
poll: 0
|
||||
when: task_result is changed
|
||||
|
||||
- name: Wait for the reboot to complete if there was a change.
|
||||
wait_for_connection:
|
||||
connect_timeout: 20
|
||||
sleep: 5
|
||||
delay: 5
|
||||
timeout: 300
|
||||
when: task_result is changed
|
||||
|
||||
|
||||
#####################################
|
||||
## INSTALL CONTAINERD ####
|
||||
#####################################
|
||||
|
||||
# Install the required packages to perform the below operations
|
||||
- name: Install required software to setup containerd install repo
|
||||
apt:
|
||||
pkg:
|
||||
- bridge-utils
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
|
||||
# Add official docker repo gpg key
|
||||
- name: Add docker official gpg key
|
||||
shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
|
||||
# Setup the repo file on the host
|
||||
- name: Setup Stable docker repository
|
||||
template:
|
||||
src: docker.list.j2
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
|
||||
# Update the repo based on the new repolist added and install containerd
|
||||
- name: Apt-get update and Install containerd
|
||||
apt:
|
||||
pkg:
|
||||
- containerd.io
|
||||
update_cache: yes
|
||||
|
||||
# Build the containerd config directory
|
||||
- name: Make /etc/containerd directory
|
||||
file:
|
||||
path: /etc/containerd
|
||||
state: directory
|
||||
|
||||
# Tell containerd the location of the config
|
||||
- name: Set containerd config default
|
||||
command: containerd config default | sudo tee /etc/containerd/config.toml
|
||||
|
||||
# Restart containerd
|
||||
- name: Restart and enable containerd
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
# Place the config file in the new config directory
|
||||
- name: Place config.toml file
|
||||
template:
|
||||
src: config.toml.j2
|
||||
dest: /etc/containerd/config.toml
|
||||
|
||||
# Restart containerd AGAIN
|
||||
- name: Restart and enable containerd
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
################################################
|
||||
### INSTALL KUBEADM, KUBELET, KUBECTL ######
|
||||
################################################
|
||||
|
||||
# Download google cloud GPG key
|
||||
- name: Download the google cloud public signing GPG key
|
||||
shell: sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
|
||||
# setup kubernetes repo
|
||||
- name: Setup kubernetes repository
|
||||
template:
|
||||
src: kubernetes.list.j2
|
||||
dest: /etc/apt/sources.list.d/kubernetes.list
|
||||
|
||||
# Install kubectl on all nodes in the lab
|
||||
- name: Apt-get update and Install kubectl on entire lab
|
||||
apt:
|
||||
pkg:
|
||||
- kubectl
|
||||
update_cache: yes
|
||||
|
||||
# Install kubeadm on all k8s nodes
|
||||
- name: Apt-get update and Install kubeadm on entire k8s cluster
|
||||
apt:
|
||||
pkg:
|
||||
- kubeadm
|
||||
update_cache: yes
|
||||
when: "'masters' or 'workers' in group_names"
|
||||
|
||||
# Install kubelet on the workers
|
||||
- name: Apt-get update and Install kubelet on workers
|
||||
apt:
|
||||
pkg:
|
||||
- kubelet
|
||||
update_cache: yes
|
||||
when: "'workers' or 'masters' in group_names"
|
||||
|
||||
|
1
roles/kubeadm_install/templates/cmdline.txt.j2
Normal file
1
roles/kubeadm_install/templates/cmdline.txt.j2
Normal file
@ -0,0 +1 @@
|
||||
net.ifnames=0 dwc_otg.lpm_enable=0 console=serial0,115200 console=tty1 root=LABEL=writable rootfstype=ext4 elevator=deadline rootwait fixrtc cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1
|
34
roles/kubeadm_install/templates/config.toml.j2
Normal file
34
roles/kubeadm_install/templates/config.toml.j2
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2018-2020 Docker Inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#disabled_plugins = ["cri"]
|
||||
|
||||
#root = "/var/lib/containerd"
|
||||
#state = "/run/containerd"
|
||||
#subreaper = true
|
||||
#oom_score = 0
|
||||
|
||||
#[grpc]
|
||||
# address = "/run/containerd/containerd.sock"
|
||||
# uid = 0
|
||||
# gid = 0
|
||||
|
||||
#[debug]
|
||||
# address = "/run/containerd/debug.sock"
|
||||
# uid = 0
|
||||
# gid = 0
|
||||
# level = "info"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
1
roles/kubeadm_install/templates/docker.list.j2
Normal file
1
roles/kubeadm_install/templates/docker.list.j2
Normal file
@ -0,0 +1 @@
|
||||
deb [arch=arm64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu focal stable
|
5
roles/kubeadm_install/templates/k8s_modules.conf.j2
Normal file
5
roles/kubeadm_install/templates/k8s_modules.conf.j2
Normal file
@ -0,0 +1,5 @@
|
||||
# Containerd Requirments
|
||||
overlay
|
||||
|
||||
# Kubeadm Requirments
|
||||
br_netfilter
|
3
roles/kubeadm_install/templates/k8s_sysctl.conf.j2
Normal file
3
roles/kubeadm_install/templates/k8s_sysctl.conf.j2
Normal file
@ -0,0 +1,3 @@
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
1
roles/kubeadm_install/templates/kubernetes.list.j2
Normal file
1
roles/kubeadm_install/templates/kubernetes.list.j2
Normal file
@ -0,0 +1 @@
|
||||
deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main
|
2
roles/kubeadm_install/tests/inventory
Normal file
2
roles/kubeadm_install/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/kubeadm_install/tests/test.yml
Normal file
5
roles/kubeadm_install/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- kubeadm_install
|
2
roles/kubeadm_install/vars/main.yml
Normal file
2
roles/kubeadm_install/vars/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# vars file for kubeadm_install
|
29
roles/kubeconfigs/.travis.yml
Normal file
29
roles/kubeconfigs/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
43
roles/kubeconfigs/README.md
Normal file
43
roles/kubeconfigs/README.md
Normal file
@ -0,0 +1,43 @@
|
||||
Kubeconfigs
|
||||
=========
|
||||
|
||||
Role to distribute kubeconfig for initializing the cluster
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- kubeadm and all cluster components installed
|
||||
|
||||
Description
|
||||
--------------
|
||||
|
||||
The cluster.kubeconfig.j2 file will be placed into the $HOME/.kube directory on master node and used with the kubeadm init --config option to initialize the cluster.
|
||||
|
||||
- There is a file under this roles files directory for more complext creation of kubeconfigs.
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||||
|
||||
- hosts: servers
|
||||
roles:
|
||||
- { role: username.rolename, x: 42 }
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/kubeconfigs/defaults/main.yml
Normal file
2
roles/kubeconfigs/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for kubeconfigs
|
229
roles/kubeconfigs/files/main_non-kubeadm.yml
Normal file
229
roles/kubeconfigs/files/main_non-kubeadm.yml
Normal file
@ -0,0 +1,229 @@
|
||||
---
|
||||
# tasks file for kubeconfigs
|
||||
|
||||
# Creating the kubeconfig files
|
||||
# First I used kubectl to generate a .kubeconfig for each k8sworker node that included 3 parts:
|
||||
# - "kubectl config set-cluster" to define the cluster
|
||||
# - "kubectl config set-credentials" to define credentials with a username as the hostname
|
||||
# - "kubectl config set-context" to set a context in the file to then use
|
||||
# use "kubectl config set-context" to then set that context in the config
|
||||
|
||||
# Each kubeconfig is created on the appropriate host at the time kubectl
|
||||
# command is run. Therefor, you must have distributed the required key and certs
|
||||
# to the nodes before running this role.
|
||||
|
||||
# worker node configs
|
||||
- name: Create worker kubeconfigs ( k8sworker01 )
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server={{load_balancer_address}}:6443 \
|
||||
--kubeconfig={{ worker01_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:node:{{ worker01_hostname }} \
|
||||
--client-certificate={{ worker01_hostname }}.pem \
|
||||
--client-key={{ worker01_hostname }}-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig={{ worker01_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:node:{{ worker01_hostname }} \
|
||||
--kubeconfig={{ worker01_hostname }}.kubeconfig
|
||||
when: ansible_hostname == "k8sworker01"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- worker
|
||||
|
||||
- name: Create worker kubeconfigs ( k8sworker02 )
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server={{load_balancer_address}}:6443 \
|
||||
--kubeconfig={{ worker02_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:node:{{ worker02_hostname }} \
|
||||
--client-certificate={{ worker02_hostname }}.pem \
|
||||
--client-key={{ worker02_hostname }}-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig={{ worker02_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:node:{{ worker02_hostname }} \
|
||||
--kubeconfig={{ worker02_hostname }}.kubeconfig
|
||||
when: ansible_hostname == "k8sworker02"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- worker
|
||||
|
||||
- name: Create worker kubeconfigs ( k8sworker03 )
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server={{load_balancer_address}}:6443 \
|
||||
--kubeconfig={{ worker03_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:node:{{ worker03_hostname }} \
|
||||
--client-certificate={{ worker03_hostname }}.pem \
|
||||
--client-key={{ worker03_hostname }}-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig={{ worker03_hostname }}.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:node:{{ worker03_hostname }} \
|
||||
--kubeconfig={{ worker03_hostname }}.kubeconfig
|
||||
when: ansible_hostname == "k8sworker03"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- worker
|
||||
|
||||
# Kubeproxy config
|
||||
- name: Create Kubeproxy config
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server={{load_balancer_address}}:6443 \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-proxy \
|
||||
--client-certificate=kube-proxy.pem \
|
||||
--client-key=kube-proxy-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:kube-proxy \
|
||||
--kubeconfig=kube-proxy.kubeconfig
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- kubeproxy
|
||||
- worker
|
||||
|
||||
# Controller manager config
|
||||
- name: Create controller manager config
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-controller-manager \
|
||||
--client-certificate=kube-controller-manager.pem \
|
||||
--client-key=kube-controller-manager-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:kube-controller-manager \
|
||||
--kubeconfig=kube-controller-manager.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- controller
|
||||
- master
|
||||
|
||||
# Scheduler config
|
||||
- name: Create scheduler config
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes \
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config set-credentials system:kube-scheduler \
|
||||
--client-certificate=kube-scheduler.pem \
|
||||
--client-key=kube-scheduler-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=system:kube-scheduler \
|
||||
--kubeconfig=kube-scheduler.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- scheduler
|
||||
- master
|
||||
|
||||
# admin config
|
||||
- name: Create sadmin config
|
||||
shell: |
|
||||
kubectl config set-cluster kubernetes\
|
||||
--certificate-authority=ca.pem \
|
||||
--embed-certs=true \
|
||||
--server=https://127.0.0.1:6443 \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config set-credentials admin \
|
||||
--client-certificate=admin.pem \
|
||||
--client-key=admin-key.pem \
|
||||
--embed-certs=true \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
|
||||
kubectl config set-context default \
|
||||
--cluster=kubernetes \
|
||||
--user=admin \
|
||||
--kubeconfig=admin.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- admin
|
||||
- master
|
||||
|
||||
|
||||
- name: Ensure default context is set ( proxy )
|
||||
shell: kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- worker
|
||||
- proxy
|
||||
- setcontext
|
||||
|
||||
- name: Ensure default context is set ( worker )
|
||||
shell: kubectl config use-context default --kubeconfig={{ ansible_hostname }}.kubeconfig
|
||||
when: "'workers' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- worker
|
||||
- setcontext
|
||||
|
||||
- name: Ensure default context is set ( admin )
|
||||
shell: kubectl config use-context default --kubeconfig=admin.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- master
|
||||
- admin
|
||||
- setcontext
|
||||
|
||||
- name: Ensure default context is set ( controller )
|
||||
shell: kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- master
|
||||
- controller
|
||||
- setcontext
|
||||
|
||||
- name: Ensure default context is set ( scheduler )
|
||||
shell: kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
- master
|
||||
- scheduler
|
||||
- setcontext
|
||||
|
2
roles/kubeconfigs/handlers/main.yml
Normal file
2
roles/kubeconfigs/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for kubeconfigs
|
53
roles/kubeconfigs/meta/main.yml
Normal file
53
roles/kubeconfigs/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
13
roles/kubeconfigs/tasks/main.yml
Normal file
13
roles/kubeconfigs/tasks/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
# tasks file for kubeconfigs
|
||||
|
||||
- name: Distribute kubeadm config to be usedwith kubeadm init --config=
|
||||
template:
|
||||
src: cluster.kubeconfig.j2
|
||||
dest: /home/k8sadmin/cluster.kubeconfig
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
when: "'masters' in group_names"
|
||||
tags:
|
||||
- kubeconfig
|
||||
|
18
roles/kubeconfigs/templates/cluster.kubeconfig.j2
Normal file
18
roles/kubeconfigs/templates/cluster.kubeconfig.j2
Normal file
@ -0,0 +1,18 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
cgroupDriver: systemd
|
||||
kubernetesVersion: 1.20.4
|
||||
kind: ClusterConfiguration
|
||||
controlPlaneEndpoint: "192.168.50.117:6443"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
experimental-encryption-provider-config: /home/k8sadmin/encryption-config.yaml
|
||||
advertise-address: 0.0.0.0
|
||||
etcd:
|
||||
external:
|
||||
endpoints:
|
||||
- https://192.168.50.240:2379
|
||||
caFile: /etc/etcd/ca.pem
|
||||
certFile: /etc/etcd/k8s-master.pem
|
||||
keyFile: /etc/etcd/k8smasterkey.pem
|
||||
networking:
|
||||
podSubnet: 10.240.0.0/16
|
2
roles/kubeconfigs/tests/inventory
Normal file
2
roles/kubeconfigs/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/kubeconfigs/tests/test.yml
Normal file
5
roles/kubeconfigs/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- kubeconfigs
|
149
roles/kubeconfigs/vars/main.yml
Normal file
149
roles/kubeconfigs/vars/main.yml
Normal file
@ -0,0 +1,149 @@
|
||||
---
|
||||
# vars file for kubeconfigs
|
||||
|
||||
# This should be the address of the load balancer that is reachable by clients
|
||||
load_balancer_address: https://192.168.50.117
|
||||
|
||||
# IP addresses of worker nodes
|
||||
worker01: 192.168.50.177
|
||||
worker02: 192.168.50.202
|
||||
worker03: 192.168.50.30
|
||||
|
||||
# Worker Hostnames for .kubeconfig templates
|
||||
worker01_hostname: k8sworker01
|
||||
worker02_hostname: k8sworker02
|
||||
worker03_hostname: k8sworker03
|
||||
|
||||
# If you decide to encrypt the certs inside your .kubeconfig, this is an example of what
|
||||
# each varaible would look like.
|
||||
ca_cert: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61623136326563393735656439373863343334653030323064396564386532323438653936323231
|
||||
3936333063643832653534386636383239396630366139650a363230326562383835336233316337
|
||||
63326435633536336132626236366334326230303363666639623938346238633862643731326466
|
||||
3738306163343231630a383233653365653466643465666237356331643465343765363631636464
|
||||
62363135333630333166633536333565336464346261333934393432623963313938306232643566
|
||||
66386130316230383133303264633034313433333961666237333937666661623935643530353862
|
||||
35633664636261333635653834326562303938623533613932646233336230303731626362643830
|
||||
64363037373965396565636437363238366536383261363131336164303236373933343763356365
|
||||
39623634326536366531633563636234623565356134353763303236626432356366636263313236
|
||||
31383339393038333164346461396566613133376665326162386539343464636637363064656332
|
||||
30383461363364336330313936363865343863386631666137666438313666386139663434316139
|
||||
33393965353162336566373733323639646632316232323730363662616365623564653835663431
|
||||
66313031333730323764353466326361623034303731626130303136623162313338373463626530
|
||||
39333231656533303061383232313363646237336235666536356663373134373334363032343834
|
||||
61306164666338383235373635616565396533663439366539353631656137616631383038653830
|
||||
35663132383434666532376230353664376138373838323762633033383761333364393935393137
|
||||
35626431303634336264303936303262393539633963343434616666646638383662653530313531
|
||||
31396136393938373262383264373333363463376663383061356237323339383933306432383930
|
||||
61306665353939653463636435613466343932326333623062363839306434303638643930336536
|
||||
33336333656363363834343239663565356438363135636162656566393736386461356263333962
|
||||
35303165666432623238353264333830613864303962353538303862306538643333326439393564
|
||||
37333130643135633634633630633337646239303535643439636665383832303632333861386536
|
||||
32346532303933646531353133666161343363356433376666326533343335393636326263353265
|
||||
31303934363135373733343665663734313535366533373461636264666164373436303064393130
|
||||
37613733663165393032646561393738333430636165326436323632393830396163666465306564
|
||||
63303832383434613764343132346236333133343365656533363961613238366166383235623533
|
||||
31306461613732616134366234663364663765623638396131376462353537316461646534363034
|
||||
32656634353430666139386362633739383133613036616135636162303332656532623730373566
|
||||
30636639326132623761633064323566333263636264343931633463653732343263386164313562
|
||||
33623134363463353730643338376333303435346534386163646236323136376438376531346633
|
||||
34366165653138613565616365356164663861396339626130663835343136613465336262393738
|
||||
36636131616637613139653936343866623039313463383064343263396436613335323564393639
|
||||
39646235333433316539643263316263383235346666386266363131363138303334336462613237
|
||||
66316134633463636664376566633139356638346234386131613235346338353062313030623130
|
||||
36666635653233373036656137306433333832323739313830333534383132303130363033356661
|
||||
35373038333161393865656236663464626331643264633231663361636631626132356430656134
|
||||
31663638366536663233623636653536303762333832326462373836616564623364303034326234
|
||||
39333863336339376365663361386133393265636537383930343463663762383664366234303062
|
||||
64306635346161393063353465313634636561626163666464356537323638333061346339653366
|
||||
65633239623633366437633634383264656634353333383965303235363530363561336533353030
|
||||
66366463313731643836663862663065303161643338643433363164383632663930383763663363
|
||||
36613238323930393365366262333166363731623362383161373130303464663831363834616338
|
||||
35336164363035623137393466663838353463333062363932376337393363356630656363643364
|
||||
34386130336137326634333766626465393837396630613137356235623235396334653464383337
|
||||
31326336306434626334616234623166623933303162326537316230306362356231656165303561
|
||||
63346231323931383635353461316537633266373636316362666431336638353831356437323936
|
||||
62323539303531366434643531656331353838393334316263363562313062373830313762623831
|
||||
35643034353537663965316134393265656634613536313639633133623131313162326235303836
|
||||
31666235306130633830333462383866613939363135633037353034373135333664623739383366
|
||||
34343339373664316530393439386466653631353839343062643636643631373131323130666139
|
||||
64333238386335383236336265303433336138643366376538623763333963303864353038323230
|
||||
31656233373335333562636264306166363238613734393236623439633336336130623235373961
|
||||
31653336636565373833323466613733613735393463653362643035666166383236316130656435
|
||||
63363037326333313334613939343634373262353332616438336561653965623333313034386639
|
||||
34366564643566386462636431666166323138663866383965666334393238356661303339623635
|
||||
30653861666661343962623065346134336432343433656532646239663137353564653238386166
|
||||
39316462626538313762376566623663383364663338356331326232353834666362353232383965
|
||||
39363438313935373162373564303839643163386564396638303833333539656233653239366139
|
||||
35396366666339613565663633336138306639373336636663313734653832383662396164636230
|
||||
64653865623330313466393833373961306130396636343764663238323930666534393431333136
|
||||
34663265636163366266353536616230316431356339646563326133656338366433663535333838
|
||||
39666137323436313638646135653730663466383363303133663838616537366365366363666165
|
||||
31313538643932336662633139346561616637316166363064646661353038656236343231643435
|
||||
34323462373432373064613736653532363062613033373863303131316362393164633739613561
|
||||
38623830376236376235333932373536336233623030386261353566373533633638313463656437
|
||||
33616566336362346534366464396264323161333734313063643932363630343561373564383936
|
||||
61373530613061633838353436343538373966386261306566666236633534623362643935343131
|
||||
63313130663939666665323434333532653063353464353661386162646433336561636164353265
|
||||
33353130626431643639666466373437613636613235613764316633343835643533343231643636
|
||||
35383165313632656139633566336262626331633231633135366361653939333464613539346332
|
||||
34373330633030363836323937646138313230393233653732333765326534346661323462663034
|
||||
38336437373739313031666662383330366630666233343361613063656233643964336537393835
|
||||
32376265333264313362623335383839613134326437393365333864313833303932616561616530
|
||||
36333031386236356336633136633234616266366137653433383632383036323762326137303234
|
||||
30343837623863316135623136383662316563393332303166363135303265653561616637306335
|
||||
66306136636436343362363037336232343236353836366333656337663466613132633530643335
|
||||
31636132313637363064623833336466326363613238343132613862663836663162383963336664
|
||||
33313736393232376631336663376437613230346434353763653966353566613031303032663336
|
||||
32393866633530356461386235616663383238336665346137363630306263333631316237303437
|
||||
62626136666636353830663463636431613732383466323633396237343131333630393463316466
|
||||
32613236363633313830623366663161303135393034663264353836626637613061623635643837
|
||||
31366561643263343438303862346236306434306239623733393363643030623939343335333039
|
||||
62643737666364303734373238666434636339376263326539333266663332343938333134306261
|
||||
63353133303130663038363332343366303735666234646238303435326566616235663734333966
|
||||
36383635666135316137353437323135626131323362613833626634633331356239316635646634
|
||||
37323831663630373165323531646563613738653239363964613938633737316237643863663064
|
||||
34346265363533303032613262366132383166643135333564333765393635306265303330353531
|
||||
38643533373732386334616165643737313830383461353734356635366130376639613335646164
|
||||
39386161356338656535616239663664313334386166623666666538326232336637326231623931
|
||||
65623330626333343639633339656661653266633837393735376538623463653863383761373339
|
||||
65353064643739353565646335303039643562666138353136393634613832346265653935343939
|
||||
38646631316138373530346235303864653531616234616262303236663439333761386661373436
|
||||
32366232303963653339613162326637663662336563376362383132656133643764643665366433
|
||||
30646136373133623330353331383834356631626265333365613466623739613265646537316136
|
||||
30333662356135376237333166643633333163383536653763326163623965643666326439313832
|
||||
64626333396362646436363638663765643134363162626566646436633264383662303539393565
|
||||
61353438633164336364653064326536363464613437373233636531373535626137313831613838
|
||||
35353933343962373164386362663563313566356462623031623361616439663566626564643166
|
||||
35343231616161353765376335353736303162643333613765393362643561633064626338616265
|
||||
66663962363431303163336662343739626532313231656136666134336433373033303130643939
|
||||
31656437393531646665306263363639666636326330386336316433373566323439636666643139
|
||||
32333131653032653233326566393130323631363563623537666566626634653138646265316532
|
||||
39616563303238363762653832393063366435636161393132303731303565313233616165316361
|
||||
31363634303630363162373137383465333563303939373734663233666465356331623539653464
|
||||
38333236396164393731343438393563333430663764336665333264313635623030383662376536
|
||||
62303537343339623231616637363032323531663332646539343063363338663635666466646135
|
||||
35333137323234663039393732353662646639313932643435643262373732326162653533623434
|
||||
39663161356531366464396433613830386564663432346363633837313333643533653366353437
|
||||
61373538616132373266653665663162373664653663666661333063396166323566643264613637
|
||||
63373866623032623135653833323830323537383032326232653462373337653563336533373965
|
||||
33343961383439303835643166323936613039373637653837316636303531376633646265653161
|
||||
61356231663432346662323566356433633332393235656465363639313637353831303865343265
|
||||
32613636303230393162646134396534616435633739633863633663373563393434653535346338
|
||||
66653636623530653634326239613963396564346237343831306339623565313863643064383133
|
||||
31386635383636643833633632396265613630633637373339393531376135356265646537326163
|
||||
39656434613463376230626464313063393662313739356333326364613739646462646663613931
|
||||
37653538626636376638336439323562306161356531386537633063316632653164656336663535
|
||||
62373761623165613331643764326366623565366638306461616163333264303036636264616437
|
||||
32316132663632326266626436633539623663663338613034326636333936366664333037363935
|
||||
30663762643030396465626261343738633062316362396564663536623964663361666433393934
|
||||
35393733316462366265313633616463366237323739386239383838613631323835663666626637
|
||||
39373666313962376535613534313266366233393165366233613235393962383833633930623963
|
||||
63616261346534376661363564623365343737313066623230663438386237646232396663326138
|
||||
64313731313839666137326239313331336663353762376336653764343566323161356231666634
|
||||
39663832613237656435316530373863383564636162643534333334373539316331386563373132
|
||||
38333631333064326563613736363839303931626561363063393231343938653937333465653966
|
||||
36633063306131303463326438323138633163323363353834336136616134663933653033626233
|
||||
39363863656165313066343933353633623530366563336563303333643339336534353862633361
|
||||
31383936343264646336666234656239616261646230303061356235363462643761
|
29
roles/nfs-server/.travis.yml
Normal file
29
roles/nfs-server/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
41
roles/nfs-server/README.md
Normal file
41
roles/nfs-server/README.md
Normal file
@ -0,0 +1,41 @@
|
||||
NFS Server
|
||||
=========
|
||||
|
||||
- This role creates an nfs server on the specified host and creates the intended directories to export.
|
||||
- This role configures clients to connect to the exported directory on the nfs server.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- Identify the names of exports to be created and their options
|
||||
- Identify nfs server and clients and seperate them by host group
|
||||
- Access to a repo to install nfs packages is required
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
||||
|
||||
- hosts: servers
|
||||
roles:
|
||||
- { role: username.rolename, x: 42 }
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/nfs-server/defaults/main.yml
Normal file
2
roles/nfs-server/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for nfs-server
|
2
roles/nfs-server/handlers/main.yml
Normal file
2
roles/nfs-server/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for nfs-server
|
53
roles/nfs-server/meta/main.yml
Normal file
53
roles/nfs-server/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
75
roles/nfs-server/tasks/main.yml
Normal file
75
roles/nfs-server/tasks/main.yml
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
# tasks file for nfs-server
|
||||
|
||||
# Note: This has been modified
|
||||
# Install nfs and nfs-common on client
|
||||
- name: Install nfs-kernel-server on nfs server only
|
||||
apt:
|
||||
pkg: nfs-kernel-server
|
||||
state: present
|
||||
when: "'management' in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
- name: Install nfs-common on nfs clients
|
||||
apt:
|
||||
pkg: nfs-common
|
||||
state: present
|
||||
when: "'management' not in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
# Create a directory to export k8sdata and permissions
|
||||
- name: Create share directory on nfs-server
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: k8sadmin
|
||||
group: k8sadmin
|
||||
mode: '0770'
|
||||
loop:
|
||||
- "{{ export01 }}"
|
||||
when: "'management' in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
# Apply /etc/exports
|
||||
- name: Create /etc/exports with the newley created directories we want to share
|
||||
template:
|
||||
src: exports.j2
|
||||
dest: /etc/exports
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
when: "'management' in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
# Update exports table
|
||||
- name: Exportfs to update exports tables
|
||||
shell: /usr/sbin/exportfs -ra
|
||||
when: "'management' in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
#Update /etc/fstab on client
|
||||
- name: Add the mount point to /etc/fstab on clients
|
||||
lineinfile:
|
||||
path: /etc/fstab
|
||||
line: "192.168.50.113:/mnt/k8sdata /mnt nfs defaults 0 1"
|
||||
backup: yes
|
||||
when: "'management' not in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
#Mount -a
|
||||
- name: Perform a mount -a
|
||||
shell: mount -a
|
||||
when: "'management' not in group_names"
|
||||
tags:
|
||||
- nfs
|
||||
|
||||
|
||||
|
||||
|
||||
|
11
roles/nfs-server/templates/exports.j2
Normal file
11
roles/nfs-server/templates/exports.j2
Normal file
@ -0,0 +1,11 @@
|
||||
# /etc/exports: the access control list for filesystems which may be exported
|
||||
# to NFS clients. See exports(5).
|
||||
#
|
||||
# Example for NFSv2 and NFSv3:
|
||||
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
|
||||
#
|
||||
# Example for NFSv4:
|
||||
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
|
||||
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
|
||||
#
|
||||
/mnt/k8sdata *(rw,sync)
|
2
roles/nfs-server/tests/inventory
Normal file
2
roles/nfs-server/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/nfs-server/tests/test.yml
Normal file
5
roles/nfs-server/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- nfs-server
|
5
roles/nfs-server/vars/main.yml
Normal file
5
roles/nfs-server/vars/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# vars file for nfs-server
|
||||
export01: /mnt/k8sdata
|
||||
#export02:
|
||||
#export03:
|
29
roles/nginx_load_balancer/.travis.yml
Normal file
29
roles/nginx_load_balancer/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
53
roles/nginx_load_balancer/README.md
Normal file
53
roles/nginx_load_balancer/README.md
Normal file
@ -0,0 +1,53 @@
|
||||
NGINX Load Balancer
|
||||
=========
|
||||
Documentation: https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/
|
||||
-------------
|
||||
Role to configure nginx load balancer:
|
||||
|
||||
- Install, start, and enable nginx
|
||||
- Create config.d directory
|
||||
- Add extra config directory to main config
|
||||
- Create K8s master/control hosts load balancer config
|
||||
- reload nginx
|
||||
|
||||
Manual Commands to match this playbook
|
||||
-------------
|
||||
These assume you're running sudo.
|
||||
|
||||
Install nginx:
|
||||
- apt-get install nginx
|
||||
|
||||
Start and enable nginx:
|
||||
- systemctl start nginx
|
||||
- systemctl enable nginx
|
||||
|
||||
Create /etc/nginx/tcpconf.d directory:
|
||||
- mkdir /etc/nginx/tcpconf.d
|
||||
|
||||
Add include statement to /etc/nginx/nginx.conf:
|
||||
- echo "include /etc/nginx/tcpconf.d/*" >> /etc/nginx/nginx.conf
|
||||
|
||||
Create /etc/nginx/tcpconf.d/kubernetes.conf:
|
||||
- vi /etc/nginx/tcpconf.d/kubernetes.conf
|
||||
- Take the file from the "templates" file in this role directory named "kubernetes_conf.j2" and paste it into /etc/nginx/tcpconf.d/kubernetes.conf
|
||||
- Hit ESC and type :wq to write and quit the file
|
||||
|
||||
Reload nginx:
|
||||
- nginx -s reload
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
- A Sudo user on your hosts you wish to apply this to
|
||||
- An internet connection or nginx and required dependencies
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/nginx_load_balancer/defaults/main.yml
Normal file
2
roles/nginx_load_balancer/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for nginx_load_balancer
|
2
roles/nginx_load_balancer/handlers/main.yml
Normal file
2
roles/nginx_load_balancer/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for nginx_load_balancer
|
53
roles/nginx_load_balancer/meta/main.yml
Normal file
53
roles/nginx_load_balancer/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
69
roles/nginx_load_balancer/tasks/main.yml
Normal file
69
roles/nginx_load_balancer/tasks/main.yml
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
# tasks file for nginx_load_balancer
|
||||
|
||||
# (Recommended) If you have plans to upgrade this single control-plane kubeadm cluster to high
|
||||
# availability you should specify the --control-plane-endpoint to set
|
||||
# the shared endpoint for all control-plane nodes.
|
||||
# Such an endpoint can be either a DNS name or an IP address of a load-balancer
|
||||
|
||||
# Install nginx
|
||||
- name: Install nginx
|
||||
apt:
|
||||
pkg: nginx
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
# Start and Enable nginx
|
||||
- name: Start and Enable nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: started
|
||||
enabled: yes
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
# Create a directory for extra nginx configs. This allows for easy management of configs
|
||||
- name: Create /etc/nginx/tcpconf.d directory
|
||||
file:
|
||||
path: /etc/nginx/tcpconf.d
|
||||
state: directory
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
# Adding this line at the end of the file will ensure nginx loads configs in the tcpconf.d directory on startup
|
||||
- name: Add include statement to /etc/nginx/nginx.conf
|
||||
lineinfile:
|
||||
path: /etc/nginx/nginx.conf
|
||||
line: "include /etc/nginx/tcpconf.d/*;"
|
||||
state: present
|
||||
backup: yes
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
# This config will build an upstream telling the nginx load balancer what servers to load balance
|
||||
- name: Create /etc/nginx/tcpconf.d/kubernetes.conf
|
||||
template:
|
||||
src: kubernetes_conf.j2
|
||||
dest: /etc/nginx/tcpconf.d/kubernetes.conf
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
- name: Reload nginx
|
||||
command: nginx -s reload
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
# Add firewall port for apiserver
|
||||
- name: Add firewall ports
|
||||
firewalld:
|
||||
port: "{{ item.port }}"
|
||||
permanent: yes
|
||||
state: enabled
|
||||
with_items:
|
||||
- { port: 6443/tcp }
|
||||
- { port: 8472/tcp }
|
||||
- { port: 8080/tcp }
|
||||
tags:
|
||||
- firewalld
|
||||
|
||||
|
19
roles/nginx_load_balancer/templates/kubernetes_conf.j2
Normal file
19
roles/nginx_load_balancer/templates/kubernetes_conf.j2
Normal file
@ -0,0 +1,19 @@
|
||||
# Define an upstream using the master nodes as the upstream nodes
|
||||
# Stream defines a top level stream block
|
||||
# Define each server and the port that server listens on for traffic
|
||||
# "upstream kubernetes" defines a server group
|
||||
# "proxy_pass" refers to the the "upstream <server group>", in this case "upstream kubernetes"
|
||||
# I have commented out master02, as I do not have that in my cluster yet
|
||||
|
||||
stream {
|
||||
upstream kubernetes {
|
||||
server 192.168.50.240:6443;
|
||||
#server master02:6443;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 6443;
|
||||
listen 443;
|
||||
proxy_pass kubernetes;
|
||||
}
|
||||
}
|
2
roles/nginx_load_balancer/tests/inventory
Normal file
2
roles/nginx_load_balancer/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/nginx_load_balancer/tests/test.yml
Normal file
5
roles/nginx_load_balancer/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- nginx_load_balancer
|
2
roles/nginx_load_balancer/vars/main.yml
Normal file
2
roles/nginx_load_balancer/vars/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# vars file for nginx_load_balancer
|
29
roles/secrets/.travis.yml
Normal file
29
roles/secrets/.travis.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
25
roles/secrets/README.md
Normal file
25
roles/secrets/README.md
Normal file
@ -0,0 +1,25 @@
|
||||
Secrets
|
||||
=========
|
||||
|
||||
Builds the encryption config to be used with the kubeadm cluster.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
The directory you place the encryption config file in needs to be one that the kube-api server container will mount to. This can be viewed in the volumes section of /etc/kubernetes/manfiests/kube-api
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
2
roles/secrets/defaults/main.yml
Normal file
2
roles/secrets/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# defaults file for secrets
|
2
roles/secrets/handlers/main.yml
Normal file
2
roles/secrets/handlers/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for secrets
|
53
roles/secrets/meta/main.yml
Normal file
53
roles/secrets/meta/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
galaxy_info:
|
||||
author: your name
|
||||
description: your role description
|
||||
company: your company (optional)
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: http://example.com/issue/tracker
|
||||
|
||||
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||
# - BSD-3-Clause (default)
|
||||
# - MIT
|
||||
# - GPL-2.0-or-later
|
||||
# - GPL-3.0-only
|
||||
# - Apache-2.0
|
||||
# - CC-BY-4.0
|
||||
license: license (GPL-2.0-or-later, MIT, etc)
|
||||
|
||||
min_ansible_version: 2.9
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
||||
#
|
||||
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||
# To view available platforms and versions (or releases), visit:
|
||||
# https://galaxy.ansible.com/api/v1/platforms/
|
||||
#
|
||||
# platforms:
|
||||
# - name: Fedora
|
||||
# versions:
|
||||
# - all
|
||||
# - 25
|
||||
# - name: SomePlatform
|
||||
# versions:
|
||||
# - all
|
||||
# - 1.0
|
||||
# - 7
|
||||
# - 99.99
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
62
roles/secrets/tasks/main.yml
Normal file
62
roles/secrets/tasks/main.yml
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
# tasks file for secrets
|
||||
|
||||
# Role for creating k8s cluster encryption key and config
|
||||
|
||||
# Documentation: https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/
|
||||
|
||||
# Generate a 32 byte random key and base64 encode it
|
||||
- name: Generate a 32 byte random key and base64 encode it
|
||||
shell: head -c 32 /dev/urandom | base64
|
||||
register: secret
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
tags:
|
||||
- encryption_config
|
||||
|
||||
# Takes the secret generated above and place it into a .j2 template
|
||||
# to create the encryption config
|
||||
- name: Place encryption config template onto master
|
||||
template:
|
||||
src: encryption-config.yaml.j2
|
||||
dest: /etc/pki/encryption-config.yaml
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
tags:
|
||||
- encryption_config
|
||||
|
||||
# Resgister the encryption config file for fetching
|
||||
- name: Register encryption config for fetching from master01
|
||||
find:
|
||||
paths: /etc/pki
|
||||
recurse: no
|
||||
patterns: "encryption-config.yaml"
|
||||
register: files_to_copy
|
||||
tags:
|
||||
- encryption_config
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
##########################################
|
||||
## WARNING: ADD THIS ROLES /FILES ##
|
||||
## DIRECTORY TO YOUR .GITIGNORE ##
|
||||
## OR EVERYONE WILL ##
|
||||
# HAVE YOUR encryption-config ##
|
||||
##########################################
|
||||
|
||||
# Bring encryption config to the ansible controller
|
||||
- name: Fetch Encryption Config
|
||||
fetch:
|
||||
src: "{{ item.path }}"
|
||||
dest: roles/secrets/files/
|
||||
flat: yes
|
||||
with_items: "{{ files_to_copy.files }}"
|
||||
tags:
|
||||
- encryption_config
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
# This task is reserved for when you have 2 or more control nodes
|
||||
#- name: Distribute encryption config to other control nodes ( masters )
|
||||
# copy:
|
||||
# src: "encryption-config.yaml"
|
||||
# dest: "/etc/pki"
|
||||
# when: inventory_hostname == groups['management'][0]
|
||||
# tags:
|
||||
# - encryption_config
|
11
roles/secrets/templates/encryption-config.yaml.j2
Normal file
11
roles/secrets/templates/encryption-config.yaml.j2
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: EncryptionConfiguration
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- aescbc:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: {{ secret.stdout }}
|
||||
- identity: {}
|
2
roles/secrets/tests/inventory
Normal file
2
roles/secrets/tests/inventory
Normal file
@ -0,0 +1,2 @@
|
||||
localhost
|
||||
|
5
roles/secrets/tests/test.yml
Normal file
5
roles/secrets/tests/test.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- secrets
|
2
roles/secrets/vars/main.yml
Normal file
2
roles/secrets/vars/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
# vars file for secrets
|
Reference in New Issue
Block a user