loading...

27/07/2023

Ubuntu DevOps One in All Part 01

 
https://computingforgeeks.com/install-kubernetes-cluster-ubuntu-jammy/
 
======================= Zafar’s Ubuntu Setup — DevOps =========================
 
1. — Update / Install
2. — Docker
3. — ansible
4. — Terraform
5. — Kubernetes – Master
6. — Kubernetes – Worker
7. — MetalLB Load Balancer
8. — Nginx Ingress Controller
9. — Prometheus and Grafana
10. — Jenkins
11. — Helm
 
1. =========================================   Update / Install ===================================
# sudo apt update
# sudo apt install wget curl git vim nano -y
 
sudo apt -y full-upgrade
[ -f /var/run/reboot-required ] && sudo reboot -f
 
 
2. =============================================== Docker ======================================
# apt install net-tools
 
Disable Sleep
# sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Prerequisie !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Uninstall old versions
# sudo apt-get remove docker docker-engine docker.io containerd runc
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Lisence !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# sudo apt-get update
# apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
 
 
Add Docker’s official GPG key:
$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg –dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
$ echo \
“deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable” | sudo tee /etc/apt/sources.list.d/docker.list > /dev/nul
 
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Install Docker for Ubuntu !!!!!!!!!!!!!!!!!!!!!!!!
 
# sudo snap install docker
# sudo apt  install docker.io
//# sudo apt  install podman-docker
 
Default Location of Docker: /var/lib/docker/containers/
# du -csh *
# df -h
# docker rmi [container ID] [Remove Container]
 
# sudo systemctl enable docker
# sudo systemctl daemon-reload
# sudo systemctl restart docker
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Login to Dockerhub !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
# docker login -u zafims
 
Pass: < docker API Password >
 
// # docker pull zafims/cicddocker:latest
 
// # docker push zafims/cicddocker:tagname
 
3. =========================================== Install Ansible ====================================
 
sudo apt install software-properties-common
sudo add-apt-repository –yes –update ppa:ansible/ansible
sudo apt install ansible
 
ansible –version
 
4. =========================================== Install Terraform ====================================
 
# sudo apt install  software-properties-common gnupg2 curl
 
# curl https://apt.releases.hashicorp.com/gpg | gpg –dearmor > hashicorp.gpg
# sudo install -o root -g root -m 644 hashicorp.gpg /etc/apt/trusted.gpg.d/
 
# sudo apt-add-repository “deb [arch=$(dpkg –print-architecture)] https://apt.releases.hashicorp.com $(lsb_release -cs) main”
 
# sudo apt install terraform
 
# terraform –version
 
5. =========================================== Install Kubernetes ===================================
 
############## Install kubelet, kubeadm and kubectl #############
 
sudo apt install curl apt-transport-https -y
curl -fsSL  https://packages.cloud.google.com/apt/doc/apt-key.gpg|sudo gpg –dearmor -o /etc/apt/trusted.gpg.d/k8s.gpg
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add –
echo “deb https://apt.kubernetes.io/ kubernetes-xenial main” | sudo tee /etc/apt/sources.list.d/kubernetes.list
 
5.1 ############## install required packages. ####################
 
sudo apt update
sudo apt install wget curl vim git -y
 
snap install kubeadm –classic
snap install kubectl –classic
snap install kubelet –classic
 
5.2 ############## Confirm installation by checking the version of kubectl. ###########
 
kubectl version –client && kubeadm version
 
5.3 ############## Disable Swap Space #######################
 
$ sudo swapoff -a 
 
$ free -h
 
!!!!!!!!!!!!!!!!!! add # (hashtag) sign in front of the line. !!!!!!!!!!!!!!!
 
$ sudo vim /etc/fstab
 
// #/swap.img none swap sw 0 0
 
!!!!!!!!!!!!!!!!!!!!! Confirm setting is correct !!!!!!!!!!!!!!!!
 
$ sudo mount -a
$ free -h
 
!!!!!!!!!!!!!!!!!!!!! Enable kernel modules and configure sysctl. !!!!!!!!!!!
 
^^^^^^ Enable kernel modules  ^^^^^^^^
 
sudo modprobe overlay
sudo modprobe br_netfilter
 
^^^^^ Add some settings to sysctl ^^^^^^^^^
 
sudo tee /etc/sysctl.d/kubernetes.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
 
^^^^^ Reload sysctl  ^^^^^^^
 
# sudo sysctl –system
 
5.4 ########################### Install Container runtime – Containerd (Master and Worker nodes) ##############
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Configure persistent loading of modules !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo tee /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Load at runtime !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo modprobe overlay
sudo modprobe br_netfilter
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Ensure sysctl params are set !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo tee /etc/sysctl.d/kubernetes.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Reload configs  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo sysctl –system
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Install required packages !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Add Docker repo  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add –
sudo add-apt-repository “deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable”
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Install containerd  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo apt update
sudo apt install -y containerd.io
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Configure containerd and start service !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo su –
mkdir -p /etc/containerd
containerd config default>/etc/containerd/config.toml
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! restart containerd  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
sudo systemctl restart containerd
sudo systemctl enable containerd
systemctl status containerd
 
5.5 ####################### cgroup driver for kubelet ###########################
 
# vi /etc/containerd/config.toml
 
//  To use the systemd cgroup driver, set plugins.cri.systemd_cgroup = true
//ex:   [plugins.”io.containerd.grpc.v1.cri”]
// systemd_cgroup = true
 
 
5.6 ############################# Initialize control plane (run on first master node) ################
 
sudo systemctl unmask docker
sudo systemctl start docker
 
$ lsmod | grep br_netfilter
$ sudo systemctl enable kubelet
 
$ sudo kubeadm config images pull
 
$ sudo kubeadm config images pull –cri-socket /run/containerd/containerd.sock
 
!!!!!!!!!!!!!!!!!!!!! Set cluster endpoint DNS name or add record to /etc/hosts file. !!!!!!!!!!!!!!
 
$ sudo vim /etc/hosts
 
172.29.20.5 k8s-cluster.zafims.com
 
!!!!!!!!!!!!!!!!!!!!!! Create cluster: !!!!!!!!!!!!!!!!!!!!!!!!!!
 
$ sudo kubeadm init –ignore-preflight-errors=all –pod-network-cidr=10.244.0.0/16
 
/* Optional
 
sudo kubeadm init \
  –pod-network-cidr=10.244.0.0/16 \
  –upload-certs \
  –control-plane-endpoint=k8sapi.zafims.com
 
End Optional*/
 
 
~~~~~~~~~~~~~~ You can optionally pass Socket file for runtime and advertise address depending on your setup. ~~~~
 
/* Optional
 
sudo kubeadm init \
  –pod-network-cidr=10.244.0.0/16 \
  –cri-socket /run/containerd/containerd.sock \
  –upload-certs \
  –control-plane-endpoint=k8s-cluster.zafims.com
 
End Optional */
 
!!!!!!!!!!!! Setup local kube-config in master node !!!!!!!!!!!!!
 
mkdir -p $HOME/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
$ kubectl cluster-info
 
–> Find Tocken
# kubeadm token create –print-join-command
 
!!!!!!!!!!!!!!!!!! Additional Master nodes can be added using the command in installation output: !!!!!!
 
kubeadm join k8s-cluster.zafims.com:6443 –token sr4l2l.2kvot0pfalh5o4ik \
    –discovery-token-ca-cert-hash sha256:c692fb047e15883b575bd6710779dc2c5af8073f7cab460abd181fd3ddb29a18 \
    –control-plane
 
5.7 ####################### Install Kubernetes network plugin ###########################
 
# w!get https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
 
$ vim kube-flannel.yml
 
net-conf.json: |
    {
      “Network”: “10.244.0.0/16”,
      “Backend”: {
        “Type”: “vxlan”
      }
    }
 
$ kubectl apply -f kube-flannel.yml
 
$ kubectl get pods -n kube-flannel
 
!!!!!!!!!!!! Confirm master node is ready: !!!!!!!!!!!!
 
$ kubectl get nodes -o wide
 
6. ########################### Add worker nodes ####################################
 
$ sudo vim /etc/hosts
 
172.29.20.5 k8s-cluster.zafims.com
 
6.1 ########################## Repeat step 1-2 #####################################
 
# sudo systemctl enable docker
# sudo systemctl daemon-reload
# sudo systemctl restart docker
 
–> Deploying a Pod Network
# sudo ufw allow 6443
# sudo ufw allow 6443/tcp
–> For looking config file
# kubectl -n kube-system get cm kubeadm-config -o yaml
 
–> Allow ports in fireall in master node
 
sudo iptables -A INPUT -p tcp –match multiport –dports 10250:10255 -j ACCEPT
sudo iptables -A INPUT -p tcp –dport 10248  -j ACCEPT
sudo iptables -A INPUT -p tcp –match multiport –dports 2379:2380 -j ACCEPT
sudo iptables -A INPUT -p tcp –match multiport –dports 10250:10251 -j ACCEPT
sudo iptables -A INPUT -p tcp –match multiport –dports 10250:10255 -j ACCEPT
sudo iptables -A INPUT -p tcp –match multiport –dports 8472 -j ACCEPT
sudo iptables -A INPUT -p tcp –match multiport –dports 6443 -j ACCEPT
 
 
# kubectl get pods –all-namespaces (Command at Master) 
# kubectl get componentstatus  or # kubectl get cs (Command at Master)
 
–> Find Tocken
# kubeadm token create –print-join-command
 
–> Joining Worker Nodes to the Kubernetes Cluster <–
 
i.e.   # sudo kubeadm join 192.168.193.136:6443 –token cw4oaa.uy3le27zn358sofg \
        –discovery-token-ca-cert-hash sha256:e53f6dbdd0d34c9055a9749e5d23265f8cddfc7403455e907485c4fb9ec3cc7e
 
 
 # kubectl get nodes (Command at Master)
 
–> Allow ports in fireall in worker nodes
 
sudo iptables -A INPUT -p tcp –match multiport –dports 30000:32767 -j ACCEPT
sudo iptables -A INPUT -p tcp –dport 10250  -j ACCEPT
 
~~~~~~~~~~ Additional Command ~~~~~~~~~~~~
// kubeadm token list
// sudo kubeadm token create
 
 
~~~~~~~~~~~~~~~~~  If you want to schedule pods on the Kubernetes control-plane node, 
~~~~~~~~~~~~~~~~~  you need to remove a taint on the master nodes.
 
kubectl taint nodes –all node-role.kubernetes.io/master-
kubectl taint nodes –all  node-role.kubernetes.io/control-plane-
 
6.2 ####################  Kubernetes Dashboard with NodePort ####################################
 
$ kubectl version
 
If not Installed then Follow Optional Step
 
[Optional Start– Install Kubectl]
 
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
 
 
[Optional Stop]
 
# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml
 
$ kubectl get svc -n kubernetes-dashboard
 
$ kubectl –namespace kubernetes-dashboard patch svc kubernetes-dashboard -p ‘{“spec”: {“type”: “NodePort”}}’
 
$ kubectl get svc -n kubernetes-dashboard kubernetes-dashboard -o yaml
 
$ vim nodeport_dashboard_patch.yaml
 
spec:
  ports:
  – nodePort: 32000
    port: 443
    protocol: TCP
    targetPort: 8443
EOF
 
$ kubectl -n kubernetes-dashboard patch svc kubernetes-dashboard –patch “$(cat nodeport_dashboard_patch.yaml)”
 
$ kubectl get deployments -n kubernetes-dashboard 
 
$ kubectl get pods -n kubernetes-dashboard
 
$ kubectl get service -n kubernetes-dashboard 
 
~~~~~~~~~~ Create Admin User to Access Kubernetes Dashboard ~~~~~~~~~
 
$ vim admin-sa.yml
 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: zafims-admin
  namespace: kube-system
 
$ kubectl apply -f admin-sa.yml
 
$ vim admin-rbac.yml
 
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: zafims-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  – kind: ServiceAccount
    name: zafims-admin
    namespace: kube-system
 
$ kubectl apply -f admin-rbac.yml
 
$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep ${zafims-admin} | awk ‘{print $1}’)
 
–> Note down Token Number
 
~~~~~~~ Accessing Kubernetes Dashboard
 
$ kubectl get services -n <namespace> | grep dashboard
 
# Example
https://192.168.200.14:32000
or
https://192.168.200.14:32254
 
7. ==============================================  MetalLB Load Balancer  ==============================
 
~~~~~~ Traffic on port 7946 must be allowed
 
!!!!!!!!!!!!!!!!!!!!!!  Deploy MetalLB Load Balancer  !!!!!!!!!!!!!!!!
 
$ kubectl cluster-info
 
$ MetalLB_RTAG=$(curl -s https://api.github.com/repos/metallb/metallb/releases/latest|grep tag_name|cut -d ‘”‘ -f 4|sed ‘s/v//’)
 
$ echo $MetalLB_RTAG
 
$ mkdir ~/metallb
$ cd ~/metallb
 
$ w!get https://raw.githubusercontent.com/metallb/metallb/v$MetalLB_RTAG/config/manifests/metallb-native.yaml
 
!!!!!!!!!!!!!! Install MetalLB Load Balancer !!!!!!!!!!!!!!!!!!
 
$ $ kubectl apply -f metallb-native.yaml
 
watch kubectl get all -n metallb-system
kubectl get pods -n metallb-system –watch
 
$ kubectl get pods  -n metallb-system
 
$ kubectl get all  -n metallb-system
 
!!!!!!!!!!!!!!!!!!!!!!!!  Create Load Balancer services Pool of IP Addresses  !!!!!!!!!!!!!!!!!!
 
The pool has IPs range 192.168.1.30   –   192.168.1.50
 
$ vim ~/metallb/ipaddress_pools.yaml
 
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: production
  namespace: metallb-system
spec:
  addresses:
  – 192.168.1.30-192.168.1.50
 
~~~~~~~ You can define multiple instances of IPAddressPoolin single definition. See example below:
 
spec:
  addresses:
  – 192.168.1.0/24
  – 172.20.20.30-172.20.20.50
  – fc00:f853:0ccd:e799::/124
 
 
!!!!!!!!!!!!!!!!!!!!!!  Announce service IPs after creation  !!!!!!!!!!!!!!!!!
 
$ vim ~/metallb/ipaddress_pools.yaml
 
 
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: production
  namespace: metallb-system
spec:
  addresses:
  – 192.168.1.30-192.168.1.50
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: l2-advert
  namespace: metallb-system
 
 
$ kubectl apply -f  ~/metallb/ipaddress_pools.yaml
 
$ kubectl get ipaddresspools.metallb.io -n metallb-system
 
$ kubectl describe ipaddresspools.metallb.io production -n metallb-system
 
~~~~~~~~~~~~  Deploying services that use MetalLB LoadBalancer  ~~~~~~~~~~~~~~~~~~~~~~~~
 
$ vim web-app-demo.yaml
 
apiVersion: v1
kind: Namespace
metadata:
  name: web
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-server
  namespace: web
spec:
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
    spec:
      containers:
      – name: httpd
        image: httpd:alpine
        ports:
        – containerPort: 80
apiVersion: v1
kind: Service
metadata:
  name: web-server-service
  namespace: web
spec:
  selector:
    app: web
  ports:
    – protocol: TCP
      port: 80
      targetPort: 80
  type: LoadBalancer
 
 
$ kubectl apply -f web-app-demo.yaml
 
$ kubectl get svc -n web
 
$ telnet 192.168.1.30 80
 
~~~~~~~~~~~~~~~  We can also access the service from a web console via http://192.168.1.30
 
!!!!!!!!!!!!!!!!!!!!!!!!  Requesting specific IP Address for a service (Choose One) !!!!!!!!!!!!!!!!!
 
$ kubectl delete -f web-app-demo.yaml
 
$ vim web-app-demo.yaml
 
apiVersion: v1
kind: Namespace
metadata:
  name: web
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-server
  namespace: web
spec:
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
    spec:
      containers:
      – name: httpd
        image: httpd:alpine
        ports:
        – containerPort: 80
apiVersion: v1
kind: Service
metadata:
  name: web-server-service
  namespace: web
spec:
  selector:
    app: web
  ports:
    – protocol: TCP
      port: 80
      targetPort: 80
  type: LoadBalancer
  loadBalancerIP: 192.168.1.35
 
 
$ kubectl apply -f web-app-demo.yaml
 
Checking IP address assigned:
$ kubectl get svc -n web
 
Testing the service:
$ curl http://192.168.1.35
 
!!!!!!!!!!!!!!!!!!!!!!!!!!  Choosing specific IP Address pool (Choose One) !!!!!!!!!!!!!!!!!!!!!!!!
 
$ kubectl delete -f web-app-demo.yaml
 
$ vim web-app-demo.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: web-server-service
  namespace: web
  annotations:
    metallb.universe.tf/address-pool: production
spec:
  selector:
    app: web
  ports:
    – protocol: TCP
      port: 80
      targetPort: 80
  type: LoadBalancer
 
 
$ kubectl apply -f web-app-demo.yaml
 
Checking IP address assigned:
$ kubectl get svc -n web
 
Testing the service:
$ curl http://192.168.1.35
 
 
!!!!!!!!!!!!!!!!!!!!  Controlling automatic address allocation (Choose One) !!!!!!!!!!!!!!!!!
 
$ kubectl delete -f web-app-demo.yaml
 
$ vim web-app-demo.yaml
 
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: expensive
  namespace: metallb-system
spec:
  addresses:
  – 42.175.26.64/30
  autoAssign: false
 
 
 
$ kubectl apply -f web-app-demo.yaml
 
Checking IP address assigned:
$ kubectl get svc -n web
 
Testing the service:
$ curl http://192.168.1.35
 
!!!!!!!!!!!!!!!!!!!!!!!!!!  IP Address Sharing  (Choose One) !!!!!!!!!!!!!!!!!!!!!!!!!!!
 
$ kubectl delete -f web-app-demo.yaml
 
$ vim web-app-demo.yaml
 
 
apiVersion: v1
kind: Service
metadata:
  name: dns-service-tcp
  namespace: demo
  annotations:
    metallb.universe.tf/allow-shared-ip: “key-to-share-192.168.1.36”
spec:
  type: LoadBalancer
  loadBalancerIP: 192.168.1.36
  ports:
    – name: dnstcp
      protocol: TCP
      port: 53
      targetPort: 53
  selector:
    app: dns
apiVersion: v1
kind: Service
metadata:
  name: dns-service-udp
  namespace: demo
  annotations:
    metallb.universe.tf/allow-shared-ip: “key-to-share-192.168.1.36”
spec:
  type: LoadBalancer
  loadBalancerIP: 192.168.1.36
  ports:
    – name: dnsudp
      protocol: UDP
      port: 53
      targetPort: 53
  selector:
    app: dns
 
 
$ kubectl apply -f web-app-demo.yaml
 
Checking IP address assigned:
$ kubectl get svc -n web
 
Testing the service:
$ curl http://192.168.1.35
 
8. ================================ Nginx Ingress Controller  ==========================
 
# controller_tag=$(curl -s https://api.github.com/repos/kubernetes/ingress-nginx/releases/latest | grep tag_name | cut -d ‘”‘ -f 4)
# w!get https://raw.githubusercontent.com/kubernetes/ingress-nginx/${controller_tag}/deploy/static/provider/baremetal/deploy.yaml
 
# mv deploy.yaml nginx-ingress-controller-deploy.yaml
 
~~~~~~~~~~~~ Feel free to check the file contents and modify where you see fit:
# vim nginx-ingress-controller-deploy.yaml
 
$ kubectl apply -f nginx-ingress-controller-deploy.yaml
 
$ kubectl config set-context –current –namespace=ingress-nginx
 
$ kubectl get pods -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx –watch
 
~~~~~~ Once the ingress controller pods are running, you can cancel the command typing Ctrl+C.
 
!!!!!!!!!! run multiple Nginx Ingress Pods
$ kubectl -n ingress-nginx scale deployment ingress-nginx-controller –replicas 2
$ kubectl get pods  -n ingress-nginx
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Configure Nginx Ingress Controller on Kubernetes !!!!!!!!!!!!!!!!!!!!!!!
 
$ kubectl get svc -n ingress-nginx
 
$ kubectl get service ingress-nginx-controller –namespace=ingress-nginx
 
$ kubectl -n ingress-nginx patch svc ingress-nginx-controller –type=’json’ -p ‘[{“op”:”replace”,”path”:”/spec/type”,”value”:”LoadBalancer”}]’
 
// service/ingress-nginx-controller patched
 
$ kubectl get service ingress-nginx-controller –namespace=ingress-nginx
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!  Deploy Services to test Nginx Ingress functionality  !!!!!!!!!!!!!!!!!!!!
 
# kubectl create namespace demo
 
# cd ~/
# vim demo-app.yml
 
kind: Pod
apiVersion: v1
metadata:
  name: apple-app
  labels:
    app: apple
spec:
  containers:
    – name: apple-app
      image: hashicorp/http-echo
      args:
        – “-text=apple”
 
kind: Service
apiVersion: v1
metadata:
  name: apple-service
spec:
  selector:
    app: apple
  ports:
    – port: 5678 # Default port for image
kind: Pod
apiVersion: v1
metadata:
  name: banana-app
  labels:
    app: banana
spec:
  containers:
    – name: banana-app
      image: hashicorp/http-echo
      args:
        – “-text=banana”
 
kind: Service
apiVersion: v1
metadata:
  name: banana-service
spec:
  selector:
    app: banana
  ports:
    – port: 5678 # Default port for image
 
 
$ kubectl apply -f demo-app.yml -n demo
 
$ kubectl get pods -n demo
 
$ kubectl -n demo logs apple-app
 
 
cat <<EOF | kubectl -n demo apply -f –
apiVersion: v1
kind: Pod
metadata:
  name: ubuntu
  labels:
    app: ubuntu
spec:
  containers:
  – name: ubuntu
    image: ubuntu:latest
    command: [“/bin/sleep”, “3650d”]
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
 
 
$ kubectl -n demo exec -ti ubuntu — bash
 
# curl apple-service:5678
# curl banana-service:5678
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!  Creating an ingress route  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
$ kubectl explain ingress
 
$ vim webapp-app-ingress.yml
 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: webapp-ingress
spec:
  ingressClassName: nginx
  rules:
  – host: webapp.k8s.example.com
    http:
      paths:
        – path: /
          pathType: Prefix
          backend:
            service:
              name: web-server-service
              port:
                number: 80
 
 
$ kubectl -n web apply -f  webapp-app-ingress.yml
 
$ kubectl get ingress -n web
 
$ kubectl get pods -n ingress-nginx
 
$ kubectl exec -n ingress-nginx  -it ingress-nginx-controller-6f5844d579-hwrqn  — /bin/bash
 
 
bash-5.1$ less /etc/nginx/nginx.conf
 
 
$ curl http://webapp.k8s.example.com/
 
9. ==============================================  Prometheus and Grafana  =============================
 
$ git clone https://github.com/prometheus-operator/kube-prometheus.git
 
$ cd kube-prometheus
 
~~~~~~~~~~~  Create monitoring namespace, CustomResourceDefinitions & operator pod
 
$ kubectl create -f manifests/setup
 
$ kubectl get ns monitoring
 
~~~~~~~~~~~  Deploy Prometheus Monitoring Stack
 
$ kubectl create -f manifests/
 
$ kubectl get pods -n monitoring -w
 
$ kubectl get svc -n monitoring
 
~~~~~~~~~~~~  Access Prometheus, Grafana, and Alertmanager dashboards
 
Grafana Dashboard
# kubectl –namespace monitoring port-forward svc/grafana 3000
 
URL:  http://localhost:3000
 
Default Logins are:
Username: admin
Password: admin
 
Prometheus Dashboard
# kubectl –namespace monitoring port-forward svc/prometheus-k8s 9090
URL: http://localhost:9090
 
Alert Manager Dashboard
kubectl –namespace monitoring port-forward svc/alertmanager-main 9093
URL : http://localhost:9093
 
 
10. ===========================================  Install Jenkins  ====================================
 
$ kubectl create namespace jenkins
$ vi jenkins.yaml
 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jenkins
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jenkins
  template:
    metadata:
      labels:
        app: jenkins
    spec:
      containers:
      – name: jenkins
        image: jenkins/jenkins:lts
        ports:
          – name: http-port
            containerPort: 8080
          – name: jnlp-port
            containerPort: 50000
        volumeMounts:
          – name: jenkins-vol
            mountPath: /var/jenkins_vol
      volumes:
        – name: jenkins-vol
          emptyDir: {}
 
 
$ kubectl create -f jenkins.yaml –namespace jenkins
 
$ kubectl get pods -n jenkins
 
$ vi jenkins-service.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: jenkins
spec:
  type: NodePort
  ports:
    – port: 8080
      targetPort: 8080
      nodePort: 30000
  selector:
    app: jenkins
 
 
apiVersion: v1
kind: Service
metadata:
  name: jenkins-jnlp
spec:
  type: ClusterIP
  ports:
    – port: 50000
      targetPort: 50000
  selector:
    app: jenkins
 
 
$ kubectl create -f jenkins-service.yaml –namespace jenkins
 
$ kubectl get services –namespace jenkins
 
 
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Accessing the Jenkins UI !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
$ kubectl get nodes -o wide
 
$ kubectl get pods -n jenkins [Take the name from output jenkins-6fb994cfc5-twnvn]
 
$ kubectl logs jenkins-6fb994cfc5-twnvn -n jenkins
 
 
N.B. You might need to scroll up or down to find the password:
User: admin
 
 
11. =========================================== Install Helm ====================================
https://computingforgeeks.com/install-and-use-helm-3-on-kubernetes-cluster/
 
# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
 
# chmod 700 get_helm.sh
 
# sudo ./get_helm.sh
 
$ helm version
 
11.2 !!!!!!!!!!!!!!!!!! Add Helm Chart repository !!!!!!!!!!!!!
 
$ helm repo add stable https://charts.helm.sh/stable
 
$ helm search repo stable
 
11.3 !!!!!!!!!!!!!!!!!!!!!!!! Install Applications on Helm Chart !!!!!!!!!!!!!
 
$ kubectl config get-contexts
$ kubectl config use-context k3s
 
$ helm repo update
 
$ helm show chart stable/nginx-ingress
 
$ helm install nginx-ingress stable/nginx-ingress
 
$ helm ls
 
To Uninstall
$ helm uninstall nginx-ingress
 
===========================================  The End =============================================
Posted in DevOpsTaggs: