Kubernetes Review
698 words
4 minutes
Set Default Namespace
1
| kubectl config set-context --current --namespace=alpha
|
Create a POD
1
| kubectl run nginx-pod --image=nginx:alpine --restart=Never --namespace=alpha
|
Create a Deployment
1
| kubectl create deploy webapp --image=nginx:alpine --replicas=3
|
Scale
1
| kubectl scale deploy webapp --replicas=1
|
History
1
| kubectl rollout history development webapp
|
Create a Service
1
| kubectl expose deploy/webapp --port=8080 --target-port=8080 --type=ClusterIP
|
Create a Role & RoleBinding
1
| kubectl create role developer-role --resource=pods --verb=create,list,get,update,delete --namespace=delta
|
1
| kubectl create rolebinding developer-role-binding --role=developer-role --user=john --namespace=delta
|
Create a ClusterRole & ClusterRoleBinding
1
| kubectl create clsuterrole dev-cluster-role --resource=developments --verb=create,list,get,update,delete
|
1
| kubectl create clusterrolebinding dev-cluster-role-binding --clusterrole=dev-cluster-role --user=john
|
ETCD
Backup
1
2
3
4
| ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
snapshot save /opt/snapshot-pre-boot.db
|
Restore
Restore ETCD
1
2
3
4
5
| ETCDCTL_API=3 etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--data-dir=/var/lib/etcd-new \
snapshot restore /opt/snapshot-pre-boot.db
|
Change ETCD Config
1
| vi /etc/kubernetes/manifests/etcd.yaml
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
| apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/etcd.advertise-client-urls: https://192.168.49.2:2379
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=https://192.168.49.2:2379
- --cert-file=/var/lib/minikube/certs/etcd/server.crt
- --client-cert-auth=true
- --data-dir=/var/lib/etcd-new # Change here and...
- --initial-advertise-peer-urls=https://192.168.49.2:2380
- --initial-cluster=minikube=https://192.168.49.2:2380
- --key-file=/var/lib/minikube/certs/etcd/server.key
- --listen-client-urls=https://127.0.0.1:2379,https://192.168.49.2:2379
- --listen-metrics-urls=http://127.0.0.1:2381
- --listen-peer-urls=https://192.168.49.2:2380
- --name=minikube
- --peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt
- --peer-client-cert-auth=true
- --peer-key-file=/var/lib/minikube/certs/etcd/peer.key
- --peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt
- --proxy-refresh-interval=70000
- --snapshot-count=10000
- --trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt
image: k8s.gcr.io/etcd:3.4.13-0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /health
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: etcd
resources:
requests:
cpu: 100m
ephemeral-storage: 100Mi
memory: 100Mi
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /health
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /var/lib/etcd-new # Change here and...
name: etcd-data
- mountPath: /var/lib/minikube/certs/etcd
name: etcd-certs
hostNetwork: true
priorityClassName: system-node-critical
volumes:
- hostPath:
path: /var/lib/minikube/certs/etcd
type: DirectoryOrCreate
name: etcd-certs
- hostPath:
path: /var/lib/etcd-new # Change here!
type: DirectoryOrCreate
name: etcd-data
status: { }
|
1
| cp -ai /etc/kubernetes/manifests/kube-scheduler.yaml /etc/kubernetes/manifests/my-scheduler.yaml
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
| apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: my-scheduler # Change here and...
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=false # Change here and...
- --port=30291 # Change here and...
- --scheduler-name=my-scheduler # Add here and...
- --secure-port=0 # Add here and...
image: k8s.gcr.io/kube-scheduler:v1.20.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /healthz
port: 10259
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/kubernetes/scheduler.conf
name: kubeconfig
readOnly: true
hostNetwork: true
priorityClassName: system-node-critical
volumes:
- hostPath:
path: /etc/kubernetes/scheduler.conf
type: FileOrCreate
name: kubeconfig
status: { }
|
CNI
Default CNI Binaries Location
Check PODs IP Range
1
| kubectl -n kube-system logs weave-net-8lwzf -c weave | grep range
|
Check Services IP Range
1
| cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep range
|
Check Proxy Type
1
| kubectl -n kube-system logs kube-proxy-jjwkz | grep proxy
|
Docker Logs
Check Docker Container Process
Logs
1
| docker logs <container-id>
|
POD Logs
Check Linux Process
journalctl
systemctl status
1
| systemctl status kubelet
|
NetworkPolicy
Generate a YAML
1
| kubectl run nginx-pod --image=nginx:alpine --restart=Never --namespace=alpha --dry-run=client -o yaml > nginx-pod.yaml
|
Execute Command
With Exist POD
1
| kubectl exec nginx-pod -- nslookup webapp-service
|
Temporally POD
1
| kubectl run test-pod --image=busybox --restart=Never --rm -it -- nc -z -v -w 2 webapp-service
|