Friday, February 12, 2021

Create a local Kubernetes cluster and deploy your code end to end

These are the commands accompanying the video tutorial I've made on how to create a local Kubernetes cluster and deploy a simple stateless app from code, end to end. You can find the video tutorial here: https://youtu.be/MZr9Ls38uPw .
#
# Install k3s
#

curl -sfL https://get.k3s.io | K3S_CLUSTER_INIT=1 INSTALL_K3S_EXEC="--disable=servicelb" sh -

cat /var/lib/rancher/k3s/server/node-token

# Run on other nodes to join the cluster
    curl -sfL https://get.k3s.io | \
      INSTALL_K3S_EXEC=server \
      K3S_URL=https://192.168.50.203:6443 \
      K3S_TOKEN=... \
      sh -


kubectl get nodes --watch

journalctl --unit=k3s

kubectl get all --all-namespaces


#
# Install dashboard
#

GITHUB_URL=https://github.com/kubernetes/dashboard/releases
VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
echo $VERSION_KUBE_DASHBOARD
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml

cat>dashboard.admin-user.yml<<"EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
EOF

cat>dashboard.admin-user-role.yml<<"EOF"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF

kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml


kubectl get all --all-namespaces

# Connect to node again, but with your port 8001 forwarded.
ssh -L 8001:127.0.0.1:8001 user1@ubuntutest1
kubectl proxy
# Open in browser:  http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
kubectl --namespace kubernetes-dashboard describe secret admin-user-token | grep ^token


#
# Install MetalLB
#

GITHUB_URL=https://github.com/metallb/metallb/releases
VERSION=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
echo $VERSION

kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$VERSION/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$VERSION/manifests/metallb.yaml
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"

cat>metallb-configmap.yml<<"EOF"
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.50.20-192.168.50.40
EOF
kubectl apply -f metallb-configmap.yml

# Check logs for errors
kubectl logs -lapp=metallb --namespace metallb-system --all-containers=true --prefix -f



#
# Install docker registry
#

apt-get install docker-compose
docker run -d -p 5000:5000 --restart=always --name registry registry
docker ps -a

# On each node:
mkdir -p /etc/rancher/k3s/
cat>/etc/rancher/k3s/registries.yaml<<"EOF"
mirrors:
  "ubuntutest1:5000":
    endpoint:
      - "http://ubuntutest1:5000"
EOF
systemctl restart k3s



#
# Create containerized app
#

mkdir -p /home/user1/dev/myapp1
cd /home/user1/dev/myapp1
cat>main.go<<"EOF"
package main

import (
  "github.com/gorilla/mux"
  "html/template"
  "log"
  "net/http"
  "os"
  "time"
)

var helloTemplate, _ = template.New("").Parse(`<!DOCTYPE html>
<html>
<head><title>testapp</title></head>
<body>
<h1>Test 1</h1>
<p>Now: {{.now.Format "2006-01-02 15:04:05" }}</p>
<p>Served from node: {{ .node }}</p>
<p>Served from pod: {{ .pod }}</p>
</body>
</html>
`)

type Msg struct {
  Ts  time.Time `json:"ts"`
}

func helloGet(w http.ResponseWriter, r *http.Request) {
  v := map[string]interface{}{
    "now":  time.Now(),
    "node": os.Getenv("NODE_NAME"),
    "pod": os.Getenv("POD_NAME"),
  }
  helloTemplate.Execute(w, v)
}

func main() {
  log.Println("Starting")

  router := mux.NewRouter()
  router.HandleFunc("/", helloGet).Methods("GET")
  handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    log.Println(r.Method + " " + r.URL.String())
    router.ServeHTTP(w, r)
  })

  log.Fatal(http.ListenAndServe(":8080", handler))
}
EOF


cat>go.mod<<"EOF"
module myapp1
EOF

touch go.sum

cat>Dockerfile<<"EOF"
FROM golang as builder
WORKDIR /app
COPY . ./
RUN go mod download
RUN CGO_ENABLED=0 GOOS=linux go build -v -o server

FROM alpine
RUN apk add --no-cache ca-certificates
COPY --from=builder /app/server /server
CMD ["/server"]
EOF


docker build -t "myapp1" .
docker run -it --rm -p 8080:8080 myapp1
docker tag myapp1 ubuntutest1:5000/myapp1:v1
docker push ubuntutest1:5000/myapp1:v1

curl -X GET ubuntutest1:5000/v2/_catalog
curl -X GET ubuntutest1:5000/v2/myapp1/tags/list


#
# Create and deploy our app in k8s
#

mkdir -p /home/user1/dev/myapp1/k8s
cd /home/user1/dev/myapp1

cat>k8s/deployment.yml<<"EOF"
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp1-deployment
spec:
  selector:
    matchLabels:
      app: myapp1
  replicas: 3
  template:
    metadata:
      labels:
        app: myapp1
    spec:
      containers:
      - name: myapp1
        image: ubuntutest1:5000/myapp1:v1
        ports:
          - containerPort: 8080
        env:
          - name: NODE_NAME
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
          - name: POD_NAME
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
      topologySpreadConstraints:
      - maxSkew: 1
        topologyKey: "kubernetes.io/hostname"
        whenUnsatisfiable: DoNotSchedule
        labelSelector:
          matchLabels:
            app: myapp1
EOF
kubectl apply -f k8s/deployment.yml


cat>k8s/service.yml<<"EOF"
kind: Service
apiVersion: v1
metadata:
  name: myapp1-service
spec:
  type: ClusterIP
  selector:
    app: myapp1
  ports:
  - name: http-myapp1
    protocol: TCP
    port: 8080
EOF
kubectl apply -f k8s/service.yml


# Expose app via HTTP proxy (aka. "ingress")
cat>k8s/ingress.yml<<"EOF"
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: myapp1-ingress
spec:
  rules:
  - http:
      paths:
        - path: /
          pathType: Prefix
          backend:
            service:
              name: myapp1-service
              port:
                number: 8080
EOF
kubectl apply -f k8s/ingress.yml

kubectl logs -lapp=myapp1 --all-containers=true --prefix -f


#
# Upgrading to a new image. Either use this command or update the same field in deployment.yml.
#


cd /home/user1/dev/myapp1
docker build -t "myapp1" .
docker tag myapp1 ubuntutest1:5000/myapp1:v2
docker push ubuntutest1:5000/myapp1:v2

kubectl set image deployments/myapp1-deployment myapp1=ubuntutest1:5000/myapp1:v2
kubectl rollout status deployments/myapp1-deployment

# Rolling back (calling repeatedly switches between two newest versions).
kubectl rollout undo deployments/myapp1-deployment



#
# Testing HA
#

kubectl get nodes ; kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName --all-namespaces

kubectl drain ubuntutest1 --ignore-daemonsets --delete-emptydir-data
kubectl uncordon ubuntutest1

No comments:

Post a Comment