#!/bin/bash echo '.' echo ' /$$$$$$$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$$' echo '|__ $$__/|_ $$_/| $$__ $$ /$$__ $$ /$$__ $$| $$_____/ | $$$ /$$$|_ $$_/| $$$ | $$| $$_____/' echo ' | $$ | $$ | $$ \ $$| $$ \__/ | $$ \ $$| $$ | $$$$ /$$$$ | $$ | $$$$| $$| $$ ' echo ' | $$ | $$ | $$$$$$$/| $$$$$$ /$$$$$$| $$ | $$| $$$$$ /$$$$$$| $$ $$/$$ $$ | $$ | $$ $$ $$| $$$$$ ' echo ' | $$ | $$ | $$____/ \____ $$|______/| $$ | $$| $$__/|______/| $$ $$$| $$ | $$ | $$ $$$$| $$__/ ' echo ' | $$ | $$ | $$ /$$ \ $$ | $$ | $$| $$ | $$\ $ | $$ | $$ | $$\ $$$| $$ ' echo ' | $$ /$$$$$$| $$ | $$$$$$/ | $$$$$$/| $$ | $$ \/ | $$ /$$$$$$| $$ \ $$| $$$$$$$$' echo ' |__/ |______/|__/ \______/ \______/ |__/ |__/ |__/|______/|__/ \__/|________/' echo '.' echo -e " \033[36;5m ___ _ ___ \033[0m" echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m" echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" echo -e " \033[36;5m \033[0m" ############################################# # YOU SHOULD ONLY NEED TO EDIT THIS SECTION # ############################################# # Set the IP addresses of the admin, managers, and workers nodes admin=10.0.3.5 manager1=10.0.3.21 manager2=10.0.3.22 manager3=10.0.3.23 worker1=10.0.3.24 worker2=10.0.3.25 # Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM) workerHostname1=SLDOKSWAP01 workerHostname2=SLDOKSWAP02 # User of remote machines user=pde # Interface used on remotes interface=eth0 # Array of all manager nodes allmanagers=($manager1 $manager2 $manager3) # Array of extra managers managers=($manager2 $manager3) # Array of worker nodes workers=($worker1 $worker2) # Array of all all=($manager1 $manager2 $manager3 $worker1 $worker2) #ssh certificate name variable certName=id_rsa ############################################# # DO NOT EDIT BELOW # ############################################# # For testing purposes - in case time is wrong due to VM snapshots sudo timedatectl set-ntp off sudo timedatectl set-ntp on # Move SSH certs to ~/.ssh and change permissions cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh chmod 600 /home/$user/.ssh/$certName chmod 644 /home/$user/.ssh/$certName.pub # Create SSH Config file to ignore checking (don't use in production!) echo "StrictHostKeyChecking no" > ~/.ssh/config #add ssh keys for all nodes for node in "${all[@]}"; do ssh-copy-id $user@$node done # Copy SSH keys to MN1 to copy tokens back later scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh # Install dependencies for each node (Docker, GlusterFS) for newnode in "${all[@]}"; do ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null apt-get update NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y systemctl start glusterd systemctl enable glusterd mkdir -p /gluster/volume1 exit EOF echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" done # Step 1: Create Swarm on first node ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt echo "StrictHostKeyChecking no" > ~/.ssh/config ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker exit EOF echo -e " \033[32;5mManager1 Completed\033[0m" # Step 2: Set variables managerToken=`cat manager` workerToken=`cat worker` # Step 3: Connect additional managers for newnode in "${managers[@]}"; do ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab mount.glusterfs localhost:/staging-gfs /mnt chown -R root:docker /mnt exit EOF echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" done # OPTIONAL # # Step 7: Add Portainer ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <