Assets: objects
- workload: Pod,ReplicaSet,Deployment,StatefulSet,DaemonSet,Job,Cronjob,...
- Service discovery and balance: service and progress
- Configuration and storage: Volume, CSI
- ConfigMap,Secret
- DownwardAPI
- Cluster level resources
- NameSpace,Node,Role,ClusterRole,RoleBinding,ClusterRoleBinding
- Metadata resource
- HPA,PodTemplate,LimitRange
#Get apiversion supported by apiServer [root@master ~]# kubectl api-versions admissionregistration.k8s.io/v1 apiextensions.k8s.io/v1 apiregistration.k8s.io/v1 apps/v1 authentication.k8s.io/v1 authorization.k8s.io/v1 autoscaling/v1 autoscaling/v2beta1 autoscaling/v2beta2 batch/v1 batch/v1beta1 certificates.k8s.io/v1 coordination.k8s.io/v1 discovery.k8s.io/v1 discovery.k8s.io/v1beta1 events.k8s.io/v1 events.k8s.io/v1beta1 flowcontrol.apiserver.k8s.io/v1beta1 networking.k8s.io/v1 node.k8s.io/v1 node.k8s.io/v1beta1 policy/v1 policy/v1beta1 rbac.authorization.k8s.io/v1 scheduling.k8s.io/v1 storage.k8s.io/v1 storage.k8s.io/v1beta1 v1 [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE client 0/1 Error 0 23h client-7c75c79d7c-h9994 0/1 CrashLoopBackOff 19 (2m14s ago) 23h nginx 1/1 Running 2 (9m13s ago) 37h nginx-deploy-7c948bcff4-jclqg 1/1 Running 1 (9m11s ago) 24h nginx-deploy-7c948bcff4-mczxp 1/1 Running 1 (9m13s ago) 24h #Output in yaml format [root@master ~]# kubectl get pods nginx -o yaml #Version number, group / version number. The default group is the core group apiVersion: v1 #kind indicates what resources are used kind: Pod #metadata metadata: creationTimestamp: "2021-11-07T00:16:21Z" labels: run: nginx name: nginx namespace: default resourceVersion: "17833" uid: 8f1cd26f-b981-428f-a156-a92b7dee2cfd #characteristic spec: containers: #Image used by container - image: nginx:1.14 imagePullPolicy: IfNotPresent #Container name name: nginx ports: - containerPort: 80 protocol: TCP resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: kube-api-access-85lz5 readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: node2 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: kube-api-access-85lz5 projected: defaultMode: 420 sources: - serviceAccountToken: expirationSeconds: 3607 path: token - configMap: items: - key: ca.crt path: ca.crt name: kube-root-ca.crt - downwardAPI: items: - fieldRef: apiVersion: v1 fieldPath: metadata.namespace path: namespace #The current state of the current resource status: conditions: - lastProbeTime: null lastTransitionTime: "2021-11-07T00:16:21Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2021-11-08T14:01:53Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2021-11-08T14:01:53Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2021-11-07T00:16:21Z" status: "True" type: PodScheduled containerStatuses: - containerID: docker://ef084460d6dc3183abd768dc89dbbbb0d9a0fab5809220d02f97e9230819ddff image: nginx:1.14 imageID: docker-pullable://nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d lastState: terminated: containerID: docker://05be395a286ac086568ec8a80a8ce017c8165a46e0e27e2f7be1e302e1c5c2d2 exitCode: 255 finishedAt: "2021-11-08T14:01:16Z" reason: Error startedAt: "2021-11-07T13:18:48Z" name: nginx ready: true restartCount: 2 started: true state: running: startedAt: "2021-11-08T14:01:52Z" hostIP: 192.168.88.103 phase: Running podIP: 10.244.1.6 podIPs: - ip: 10.244.1.6 qosClass: BestEffort startTime: "2021-11-07T00:16:21Z" #The kubectl proxy command enables the API server to listen on the local port 8001 [root@master ~]# kubectl proxy Starting to serve on 127.0.0.1:8001 #Accessing local resources through curl will return json format configuration information [root@master ~]# curl http://localhost:8001/api/v1/namespaces/default/pods/myapp-7b595df7fc-9d92c
How to create a resource:
- apiServer only receives resource definitions in JSON format
- The configuration list is provided in yaml format, and apiserver is automatically converted to JSON format and submitted
Configuration list of resources
- apiVersion[group/version]
- kind: resource category
- Metadata: metadata
- name: must be unique
- Namespace: namespace
- Labels: labels
- annotation: annotation
- Reference PATH for each resource
- /api/GROUP/version/namespaces/namespace/type/name
- spec: desired state
- status: current state this field is maintained by the kubernetes cluster
#Use explain to see how pods resources are defined [root@master ~]# kubectl explain pods KIND: Pod VERSION: v1 DESCRIPTION: Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts. FIELDS: ... ... metadata <Object> Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata ... ... #View resource secondary definitions [root@master ~]# kubectl explain pods.metadata KIND: Pod VERSION: v1 RESOURCE: metadata <Object> DESCRIPTION: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. FIELDS: annotations <map[string]string> Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
Creating a pod through yaml
#yaml file apiVersion: v1 kind: Pod metadata: name: pod-demo namespace: default labels: app: myapp tier: frontend spec: containers: - name: nginx-new image: nginx:1.14 - name: busybox image: busybox:latest command: - "/bin/sh" - "-c" - "sleep 3600" #Creating a pod through yaml [root@master manifests]# kubectl create -f nginx.yaml pod/pod-demo created [root@master manifests]# kubectl get pods NAME READY STATUS RESTARTS AGE client 0/1 Error 0 24h client-7c75c79d7c-h9994 0/1 Completed 32 (5m25s ago) 24h nginx 1/1 Running 2 (73m ago) 38h nginx-deploy-7c948bcff4-jclqg 1/1 Running 1 (73m ago) 25h nginx-deploy-7c948bcff4-mczxp 1/1 Running 1 (73m ago) 25h pod-demo 2/2 Running 0 8s #The first method to delete a pod [root@master manifests]# kubectl delete -f nginx.yaml pod "pod-demo" deleted #The second method of deleting pod [root@master manifests]# kubectl delete pod-demo #view log [root@master manifests]# kubectl logs pod-demo busybox /bin/sh: can't create /usr/share/nginx/html/index.html: nonexistent directory #-l or -- selector tag filtering to obtain the resource list of the corresponding tag [root@master ~]# kubectl get pods --selector app=nginx-deploy NAME READY STATUS RESTARTS AGE nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (46m ago) 2d nginx-deploy-7c948bcff4-mczxp 1/1 Running 2 (46m ago) 2d #-L displays the values of multiple labels [root@master ~]# kubectl get pods -L app,run NAME READY STATUS RESTARTS AGE APP RUN client 0/1 Error 0 47h client client-7c75c79d7c-h9994 0/1 CrashLoopBackOff 47 (20s ago) 47h client nginx 1/1 Running 3 (48m ago) 2d14h nginx nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (48m ago) 2d nginx-deploy nginx-deploy-7c948bcff4-mczxp 1/1 Running 2 (48m ago) 2d nginx-deploy #kubectl label labels the specified resource [root@master ~]# kubectl label pods nginx release=tye pod/nginx labeled # View label settings [root@master ~]# kubectl get pods -l release --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx 1/1 Running 3 (52m ago) 2d14h release=tye,run=nginx #If the value of the label already exists when setting the label, you need to use the -- overwrite parameter, otherwise it cannot be modified [root@master ~]# kubectl label pods nginx release=edison error: 'release' already has a value (tye), and --overwrite is false [root@master ~]# kubectl label pods nginx release=edison --overwrite pod/nginx labeled [root@master ~]# kubectl get pods -l release --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx 1/1 Running 3 (54m ago) 2d14h release=edison,run=nginx [root@master ~]# kubectl label pods nginx-deploy-7c948bcff4-jclqg release=tye pod/nginx-deploy-7c948bcff4-jclqg labeled [root@master ~]# kubectl get pods -l release NAME READY STATUS RESTARTS AGE nginx 1/1 Running 3 (61m ago) 2d14h nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (61m ago) 2d [root@master ~]# kubectl get pods -l release --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx 1/1 Running 3 (61m ago) 2d14h release=edison,run=nginx nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (61m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye [root@master ~]# kubectl get pods -l release,app --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (62m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye [root@master ~]# kubectl get pods -l release=edison,run=nginx --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx 1/1 Running 3 (62m ago) 2d14h release=edison,run=nginx [root@master ~]# kubectl get pods -l release!=edison --show-labels NAME READY STATUS RESTARTS AGE LABELS client 0/1 Error 0 47h run=client client-7c75c79d7c-h9994 0/1 CrashLoopBackOff 49 (4m54s ago) 47h app=client,pod-template-hash=7c75c79d7c nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (63m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye nginx-deploy-7c948bcff4-mczxp 1/1 Running 2 (63m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4 #Find the pod with the label release edison or type [root@master ~]# kubectl get pods -l " release in (edison,tye)" --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx 1/1 Running 3 (76m ago) 2d14h release=edison,run=nginx nginx-deploy-7c948bcff4-jclqg 1/1 Running 2 (76m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye #Find the pod with label release other than edison and type [root@master ~]# kubectl get pods -l " release notin (edison,tye)" --show-labels NAME READY STATUS RESTARTS AGE LABELS client 0/1 Error 0 2d run=client client-7c75c79d7c-h9994 0/1 CrashLoopBackOff 52 (4m5s ago) 2d app=client,pod-template-hash=7c75c79d7c nginx-deploy-7c948bcff4-mczxp 1/1 Running 2 (77m ago) 2d app=nginx-deploy,pod-template-hash=7c948bcff4,release=dizzy #View the node list and display the label [root@master ~]# kubectl get node --show-labels NAME STATUS ROLES AGE VERSION LABELS master Ready control-plane,master 2d23h v1.22.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers= node1 Ready <none> 2d23h v1.22.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux node2 Ready <none> 2d23h v1.22.3 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux
Resource allocation list
Autonomous Pod resources
Pod resources:
spec.containers <[]Object>
- name
image
imagePullPolicy
Always: if the mirror image is: latest, this item is used by default
Never: never download
IfNotPresent: this option is used if the image does not use latest. If it exists locally, it will not be downloaded. If it does not exist, it will be downloaded from the registry.
command:
labels:
key = value
key: letters, numbers,;, -
value: can be blank, beginning and end of letters and numbers, and letters, numbers and underscores can be used in the middle
Label selector:
Equivalence relation: =, = ==
Set relationship:
key in (value1,value2...)
key not in (value1,value2...)
key
! key does not exist
Many resources support inline field definition label selectors
matchLabels: give key values directly
matchExpressions: define the use label selector based on the given expression, {key: "key", operator: "operator", values:[value1,value2...]}
Operator:
In, not in: values field must be a non empty list
Exists,NotExists: the value of the values field must be an empty list
[root@master ~]# kubectl explain pods.spec | grep nodeSelector nodeSelector <map[string]string>
Node label selector: the resource runs on the specified node node
nodeSelector <map[string]string>
NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on
that node. More info:
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
Specify node run resources
nodeName
NodeName is a request to schedule this pod onto a specific node. If it is
non-empty, the scheduler simply schedules this pod onto that node, assuming
that it fits resource requirements.
annotations:
Unlike label, it cannot be used to select resource objects, but only to provide "metadata" for objects
#Add the following line to the metadata of the yaml file annotations: tye.com/createby: cluster admin [root@master manifests]# kubectl describe pods pod-demo Name: pod-demo Namespace: default Priority: 0 Node: node2/192.168.88.103 Start Time: Tue, 09 Nov 2021 10:07:07 -0500 Labels: app=myapp tier=frontend Annotations: tye.com/createby: cluster admin
Pod life cycle:
Status:
Pending: debugging has not been completed
Running: running status
Failed: failed
Successed:
Unknown:
Important behaviors in Pod life cycle:
Primary container
Container detection
liveness: survival detection
readiness:
Restart policy: restart policy
Always: automatic restart
OnFailure:
Never: do not restart
Default to Always:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
Probe type
- ExecAction
- TCPSocketAction
- HTTPGetAction
#Exec probes yaml files to determine whether the directory exists apiVersion: v1 kind: Pod metadata: name: liveness-exec-pod namespace: default spec: containers: - name: liveness-exec-container image: busybox:latest imagePullPolicy: IfNotPresent command: ["/bin/sh","-c","touch /tmp/healty;sleep 30;rm -rf /tmp/healty;sleep 3600"] livenessProbe: exec: command: ["test","-e","/tmp/healty"] initialDelaySeconds: 2 periodSeconds: 3 #View pod information [root@master manifests]# kubectl describe pod liveness-exec-pod Name: liveness-exec-pod Namespace: default Priority: 0 Node: node1/192.168.88.102 Start Time: Wed, 10 Nov 2021 08:51:30 -0500 Labels: <none> Annotations: <none> Status: Running IP: 10.244.3.14 IPs: IP: 10.244.3.14 Containers: liveness-exec-container: Container ID: docker://09cfbe11120e3ff96c0cacfc5f83569a599dcfad1486581ce3215f3bc36d6343 Image: busybox:latest Image ID: docker-pullable://busybox@sha256:139abcf41943b8bcd4bc5c42ee71ddc9402c7ad69ad9e177b0a9bc4541f14924 Port: <none> Host Port: <none> Command: /bin/sh -c touch /tmp/healty;sleep 30;rm -rf /tmp/healty;sleep 3600 State: Running Started: Wed, 10 Nov 2021 08:53:48 -0500 Last State: Terminated Reason: Error Exit Code: 137 Started: Wed, 10 Nov 2021 08:52:39 -0500 Finished: Wed, 10 Nov 2021 08:53:48 -0500 Ready: True Restart Count: 2 Liveness: exec [test -e /tmp/healty] delay=2s timeout=1s period=3s #success=1 #failure=3 Environment: <none> Mounts: /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-t76fk (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: kube-api-access-t76fk: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: <nil> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 2m44s default-scheduler Successfully assigned default/liveness-exec-pod to node1 Warning Unhealthy 56s (x6 over 2m11s) kubelet Liveness probe failed: Normal Killing 56s (x2 over 2m5s) kubelet Container liveness-exec-container failed liveness probe, will be restarted Normal Pulled 26s (x3 over 2m44s) kubelet Container image "busybox:latest" already present on machine Normal Created 26s (x3 over 2m44s) kubelet Created container liveness-exec-container Normal Started 26s (x3 over 2m44s) kubelet Started container liveness-exec-container
readiness
#Readiness yaml file apiVersion: v1 kind: Pod metadata: name: readiness-httpget-pod namespace: default spec: containers: - name: readiness-exec-container image: nginx:1.14 imagePullPolicy: IfNotPresent ports: - name: http containerPort: 80 readinessProbe: httpGet: port: http path: /index.html initialDelaySeconds: 2 periodSeconds: 3
Pod controller
ReplicaSet: manages stateless Pod replicas
- Number of Pod copies expected by the user
- Tag selector to service and manage Pod resources
- Pod template:
Deployment: control ReplicaSet
DaemonSet:
Job: one time job
CronJob:
StatefulSet:
TPR(Third Party Resources)
CDR(Custom Defined Resources)
Operator:
Helm:
Creating a ReplicaSet from a yaml file
apiVersion: apps/v1 kind: ReplicaSet metadata: name: myapp namespace: default spec: replicas: 2 selector: matchLabels: app: myapp release: tye template: metadata: name: myapp-pod labels: app: myapp release: tye enviroment: qa spec: containers: - name: myapp-container image: nginx:1.14 ports: - name: http containerPort: 80 [root@master manifests]# kubectl create -f rs-demo.yaml replicaset.apps/myapp created #View ReplicaSet [root@master manifests]# kubectl get rs NAME DESIRED CURRENT READY AGE client-7c75c79d7c 1 1 0 3d myapp 2 2 2 5s nginx-deploy-7c948bcff4 2 2 2 3d1h [root@master manifests]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-5v76c 1/1 Running 0 8m16s myapp-ftfnd 1/1 Running 0 8m16s #RS expansion #Modify the number of replicas = [num] via kubectl edit rs [rsname]
#deploy-pod.yaml apiVersion: apps/v1 kind: Deployment metadata: name: myapp-deploy namespace: default spec: replicas: 3 selector: matchLabels: app: myapp release: tye template: metadata: labels: app: myapp release: tye spec: containers: - name: myapp image: ikubernetes/myapp:v2 ports: - name: http containerPort: 80 #Apply a configuration to the resource, create if the resource does not exist, and update if it exists [root@master manifests]# kubectl apply -f deploy-pod.yaml #kubectl patch patching [root@master ~]# kubectl patch deploy myapp-deploy -p '{"spec":{"replicas":5}}' deployment.apps/myapp-deploy patched [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-deploy-545b89888c-6gd2h 1/1 Running 0 11m myapp-deploy-545b89888c-r49rk 1/1 Running 0 10m myapp-deploy-545b89888c-svswl 1/1 Running 0 4s myapp-deploy-545b89888c-vz967 1/1 Running 0 4s myapp-deploy-545b89888c-zxcr5 1/1 Running 0 10m [root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0}}}}' deployment.apps/myapp-deploy patched [root@master manifests]# kubectl describe deploy myapp-deploy Name: myapp-deploy Namespace: default CreationTimestamp: Thu, 11 Nov 2021 08:57:29 -0500 Labels: <none> Annotations: deployment.kubernetes.io/revision: 2 Selector: app=myapp,release=tye Replicas: 5 desired | 5 updated | 5 total | 5 available | 0 unavailable StrategyType: RollingUpdate MinReadySeconds: 0 RollingUpdateStrategy: 0 max unavailable, 1 max surge [root@master manifests]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy deployment.apps/myapp-deploy image updated deployment.apps/myapp-deploy paused [root@master ~]# kubectl get pods -l app=myapp -w NAME READY STATUS RESTARTS AGE myapp-deploy-545b89888c-6gd2h 1/1 Running 0 21m myapp-deploy-545b89888c-r49rk 1/1 Running 0 20m myapp-deploy-545b89888c-svswl 1/1 Running 0 10m myapp-deploy-545b89888c-vz967 1/1 Running 0 10m myapp-deploy-545b89888c-zxcr5 1/1 Running 0 20m myapp-deploy-fbd4c499b-m9258 0/1 Pending 0 0s myapp-deploy-fbd4c499b-m9258 0/1 Pending 0 0s myapp-deploy-fbd4c499b-m9258 0/1 ContainerCreating 0 0s myapp-deploy-fbd4c499b-m9258 1/1 Running 0 18s #Continue update [root@master manifests]# kubectl rollout resume deployment myapp-deploy deployment.apps/myapp-deploy resumed #Monitor deployment [root@master manifests]# kubectl rollout status deployment myapp-deploy Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated... [root@master manifests]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp-deploy-545b89888c 0 0 0 28m myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=545b89888c,release=tye myapp-deploy-7dfc7b4f66 0 0 0 52m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=7dfc7b4f66,release=tye myapp-deploy-fbd4c499b 5 5 5 4m24s myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=fbd4c499b,release=tye #View rollback history version [root@master manifests]# kubectl rollout history deployment myapp-deploy deployment.apps/myapp-deploy REVISION CHANGE-CAUSE 1 <none> 2 <none> 3 <none> #Rollback to version 1 [root@master manifests]# kubectl rollout undo deployment myapp-deploy --to-revision=1 deployment.apps/myapp-deploy rolled back [root@master manifests]# kubectl rollout history deployment myapp-deploy deployment.apps/myapp-deploy REVISION CHANGE-CAUSE 2 <none> 3 <none> 4 <none> #Check that the deployment currently working is v1 [root@master manifests]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp-deploy-545b89888c 0 0 0 34m myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=545b89888c,release=tye myapp-deploy-7dfc7b4f66 5 5 5 58m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=7dfc7b4f66,release=tye myapp-deploy-fbd4c499b 0 0 0 10m myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=fbd4c499b,release=tye
DeamonSet
[root@node1 ~]# docker pull ikubernetes/filebeat:5.6.5-alpine 5.6.5-alpine: Pulling from ikubernetes/filebeat e6faa08065ed: Pull complete 1956334c4aa9: Pull complete f3f537aca9dd: Pull complete Digest: sha256:530f31ebf9194b0400303320579cafe21da890bd06446746fcedc2b65875e4eb Status: Downloaded newer image for ikubernetes/filebeat:5.6.5-alpine docker.io/ikubernetes/filebeat:5.6.5-alpine # yaml file for DaemonSet apiVersion: apps/v1 kind: Deployment metadata: name: redis namespace: default spec: replicas: 1 selector: matchLabels: app: redis role: logstor template: metadata: labels: app: redis role: logstor spec: containers: - name: redis image: redis:4.0-alpine ports: - name: redis containerPort: 6379 --- apiVersion: apps/v1 kind: DaemonSet metadata: name: myapp-ds namespace: default spec: selector: matchLabels: app: filebeat release: stable template: metadata: labels: app: filebeat release: stable spec: containers: - name: myapp image: ikubernetes/filebeat:5.6.5-alpine env: - name: REDIS_HOST value: redis.default.svc.cluster.local - name: REDIS_LOG_LEVEL value: info [root@master manifests]# kubectl apply -f ds-demo.yaml daemonset.apps/myapp-ds created