loading...

26/07/2023

k8s in AlmaLinux

 
https://www.linuxhowto.net/install-kubernetes-cluster-using-kubeadm-in-rhel-centos-almalinux-rocky-linux/
https://www.linuxtechi.com/how-to-install-kubernetes-cluster-rhel/
 
 
 
 
========= Install Kubernetes Cluster Using Kubeadm In RHEL, CentOS, AlmaLinux, Rocky Linux ==========
 
———- To proceed installation, we need below mentioned basic requirements.————–
 
Minimum 2 hosts.
2 CPUs.
2GB of Physical Memory (RAM).
20GB of Disk Space.
Internet connection to download packages.
 
 
 
 
=================== 1. Configure Hostname and IP address ==================== [Master & Worker]
 
Here we are going to have two hosts:
 
k8s-Master – Master
k8s-Worker – Worker
 
# hostnamectl set-hostname k8s-master
# hostnamectl set-hostname k8s-worker
 
# vi /etc/hosts
 
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1       localhost
 
192.168.199.198 k8s-Master
192.168.199.175 k8s-Worker-01
192.168.199.182 k8s-Worker-02
 
# sudo dnf install -y iproute-tc
# sudo yum -y install wget curl
 
 
 
 
=================== 2. Disable SElinux ==================================[ Master & Worker ]
 
# vi /etc/selinux/config
 
SELINUX=disabled

#reboot
 
// $ sudo sed -i ‘s/^SELINUX=enforcing$/SELINUX=permissive/’ /etc/selinux/config
 
# sestatus
 
 
 
 
=================3. Disable Swap in Master and Worker =======================[ Master & Worker ]
 
# swapoff -a && sed -i ‘/swap/d’ /etc/fstab
 
# sudo swapoff -a
# sudo sed -i ‘/ swap / s/^/#/’ /etc/fstab
 
 
 
 
================4. Allow the required ports in firewall ========================================
 
—> Master Node:
 
firewall-cmd –permanent –add-port=6443/tcp
firewall-cmd –permanent –add-port=2379-2380/tcp
firewall-cmd –permanent –add-port=7946/tcp
firewall-cmd –permanent –add-port=10250/tcp
firewall-cmd –permanent –add-port=10251/tcp
firewall-cmd –permanent –add-port=10259/tcp
firewall-cmd –permanent –add-port=10252/tcp
firewall-cmd –permanent –add-port=10255/tcp
firewall-cmd –permanent –add-port=10257/tcp
firewall-cmd –permanent –add-port=179/tcp
firewall-cmd –permanent –add-port=4789/udp
 
firewall-cmd –reload
 
—> Worker Node:
 
firewall-cmd –permanent –add-port=7946/tcp
firewall-cmd –permanent –add-port=10250/tcp
firewall-cmd –permanent –add-port=30000-32767/tcp
firewall-cmd –permanent –add-port=179/tcp
sudo firewall-cmd –permanent –add-port=4789/udp
 
firewall-cmd –reload
 
// # systemctl stop firewalld
// # systemctl disable firewalld
 
# init 6        [Must Restart]
 

=============== 5. Install Docker=========================================== [Master & Worker]

# sudo yum install -y yum-utils
# sudo yum-config-manager –add-repo https://download.docker.com/linux/centos/docker-ce.repo
# sudo yum install -y docker-ce docker-ce-cli containerd.io –allowerasing

# sudo systemctl start docker
# sudo systemctl enable docker

# sudo vi /etc/docker/daemon.json

{
“exec-opts”: [“native.cgroupdriver=systemd”],
“log-driver”: “json-file”,
“log-opts”: {
“max-size”: “100m”
},
“storage-driver”: “overlay2”
}

sudo systemctl daemon-reload
sudo systemctl restart docker

=========================== 6. Install Kubernetes =============================== [Master & Worker]

———-> Install CRI-O container runtime <—————–

# sudo vi /etc/modules-load.d/k8s.conf

overlay
br_netfilter

sudo modprobe overlay
sudo modprobe br_netfilter

$ sudo vi /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1

$ sudo sysctl –system

$ export VERSION=1.21

$ sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo

$ sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/CentOS_8/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo

$ sudo dnf install cri-o

sudo systemctl start cri-o
sudo systemctl enable cri-o

—————-> Install Kubernetes Packages <—————— [in Master Node & Worker Node]

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

# sudo yum install -y kubelet kubeadm kubectl –disableexcludes=Kubernetes

sudo systemctl start kubelet
sudo systemctl enable kubelet

# swapoff -a

=========================== 7. Create a Kubernetes cluster =========================== [Master]

—————-> Create a Kubernetes cluster <—————–

$ sudo kubeadm init –cri-socket /var/run/crio/crio.sock –ignore-preflight-errors=all
// $ sudo kubeadm init –pod-network-cidr=192.168.0.0/16

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

[OPtional]
// $ kubectl taint nodes –all node-role.kubernetes.io/master-all

————————–> Set permanent bash aliases <—————————-

# vi ~/.bashrc

if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi

# source ~/.bashrc

# vi ~/.bash_aliases

# alias kubectl=’microk8s.kubectl’
alias k=’kubectl’
alias kc=’k config view –minify | grep name’
alias kdp=’kubectl describe pod’
alias c=’clear’
alias kd=’kubectl describe pod’
alias ke=’kubectl explain’
alias kf=’kubectl create -f’
alias kg=’kubectl get pods –show-labels’
alias kr=’kubectl replace -f’
alias ks=’kubectl get namespaces’
alias l=’ls -lrt’
alias kga=’k get pod –all-namespaces’
alias kgaa=’kubectl get all –show-labels’
alias ll=’ls -alF’
alias update=’sudo — sh -c “yum update && yum upgrade”‘

# source ~/.bash_aliases

 
————————->  install Git <———————————-
# yum install git
 
————————-> Install Helm  <————————————
 
# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
$ chmod 700 get_helm.sh
$ ./get_helm.sh
 
 
 
 
 
 
=================== 8. Adding worker node to the cluster =================== [Worker]
 
N.B. Add –cri-socket /var/run/crio/crio.sock if multiple network present
 
# kubeadm join 192.168.199.198:6443 –token mv5c6q.ps59xjcoilkcjt9x \
        –discovery-token-ca-cert-hash sha256:b37096b6d23c2333c331116415a1db68af7e2b5b088115187707c0ca6b8fb414 –cri-socket /var/run/crio/crio.sock
 
 
 
 
—–> if want to reset the worker node(Reset certificates )
// # kubeadm reset -f –cri-socket /var/run/crio/crio.sock –ignore-preflight-errors=all

———–> Test in Master Node <—————

$ kubectl get nodes
# kubectl cluster-info
# kubectl config view
# kubectl config get-contexts

[Optional]

——> For New Token <———

$ kubeadm token list
# kubeadm token create –print-join-command

================ 9. Install Calico Pod Network Add-on ==================== [Master]

// # kubectl create namespace agones-system
// # kubectl apply –server-side -f https://raw.githubusercontent.com/googleforgames/agones/release-1.32.0/install/yaml/install.yaml

# curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml -O
# ls -l calico.yaml
# vi calico.yaml

N.B. Search for IPV4_IPIP Change value “Always” to Never.
Go down and comment out

– name: CALICO_IPV4POOL_CIDR
value: “10.142.0.0/24”

# kubectl apply -f calico.yaml

# watch kubectl get no
# kubectl get pods -n kube-system

# w!get https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz
# w!get https://github.com/projectcalico/calico/releases/download/v3.24.5/tigera-operator-v3.24.5.tgz

# tar -zxvf helm-v3.10.3-linux-amd64.tar.gz
# chmod +x linux-amd64/helm
# mv linux-amd64/helm /usr/local/bin/
# helm install calico tigera-operator-v3.24.5.tgz -n kube-system –create-namespace

# kubectl get pod -n kube-system | grep tigera-operator
# kubectl get pod -n calico-system
# kubectl api-resources | grep calico

# curl -o kubectl -calico -0 -L “https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64”

# tar -zxvf helm-v3.10.3-linux-amd64.tar.gz
# chmod +x linux-amd64/helm

# mv linux-amd64/helm /usr/local/bin/
# w!get https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64
# mv calicoctl-linux-amd64 calicoctl
# chmod +x calicoctl
# mv calicoctl /usr/local/bin/
//mv calicoctl /usr/local/bin/

# calicoctl node status

# calicoctl get ippools default-ipv4-ippool -o yaml –allow-version-mismatch
# calicoctl ipam show –show-blocks –allow-version-mismatch

# calicoctl get networkpolicies –all-namespaces –allow-version-mismatch

# calicoctl version

# alias calicoctl=”kubectl exec -i -n kube-system calicoctl — /calicoctl”

//# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml
//# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml
//# watch kubectl get pods -n calico-system

//# kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/flannel-migration/calico.yaml

//# kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/flannel-migration/migration-job.yaml

//# kubectl get jobs -n kube-system flannel-migration
//N.B. wait for migration

//# kubectl delete -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/flannel-migration/migration-job.yaml

================ 10. Enable kubectl to manage Calico APIs =========================[Master]

# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/apiserver.yaml

# openssl req -x509 -nodes -newkey rsa:4096 -keyout apiserver.key -out apiserver.crt -days 365 -subj “/” -addext “subjectAltName = DNS:calico-api.calico-apiserver.svc”

# kubectl create secret -n calico-apiserver generic calico-apiserver-certs –from-file=apiserver.key –from-file=apiserver.crt

# kubectl patch apiservice v3.projectcalico.org -p \
“{\”spec\”: {\”caBundle\”: \”$(kubectl get secret -n calico-apiserver calico-apiserver-certs -o go-template='{{ index .data “apiserver.crt” }}’)\”}}”

# kubectl api-resources | grep ‘\sprojectcalico.org’

$ watch kubectl get pods -n calico-system

To verify the master node’s availability in the cluster, run the command:

# kubectl get daemonset calico-node –namespace=kube-system
$ kubectl get nodes
$ kubectl get nodes -o wide

$ kubectl get pods –all-namespaces

============================================ End ================================

Posted in k8sTaggs: