6.k8s component configuration (different from point 4)
All k8s nodes create the following directories
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
6.1. Create apiserver (all master nodes)
6.1.1master01 node configuration
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-apiserver \ --v=2 \ --logtostderr=true \ --allow-privileged=true \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=192.168.1.30 \ --service-cluster-ip-range=10.96.0.0/12 \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --client-ca-file=/etc/kubernetes/pki/ca.pem \ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/pki/sa.pub \ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \ --service-account-issuer=https://kubernetes.default.svc.cluster.local \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ --authorization-mode=Node,RBAC \ --enable-bootstrap-token-auth=true \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User # --token-auth-file=/etc/kubernetes/token.csv Restart=on-failure RestartSec=10s LimitNOFILE=65535 [Install] WantedBy=multi-user.target EOF
6.1.2 master 02 node configuration
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-apiserver \ --v=2 \ --logtostderr=true \ --allow-privileged=true \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=192.168.1.31 \ --service-cluster-ip-range=10.96.0.0/12 \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --client-ca-file=/etc/kubernetes/pki/ca.pem \ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/pki/sa.pub \ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \ --service-account-issuer=https://kubernetes.default.svc.cluster.local \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ --authorization-mode=Node,RBAC \ --enable-bootstrap-token-auth=true \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User # --token-auth-file=/etc/kubernetes/token.csv Restart=on-failure RestartSec=10s LimitNOFILE=65535 [Install] WantedBy=multi-user.target EOF
6.1.3 master03 node configuration
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-apiserver \ --v=2 \ --logtostderr=true \ --allow-privileged=true \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=192.168.1.32 \ --service-cluster-ip-range=10.96.0.0/12 \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.1.30:2379,https://192.168.1.31:2379,https://192.168.1.32:2379 \ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --client-ca-file=/etc/kubernetes/pki/ca.pem \ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/pki/sa.pub \ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \ --service-account-issuer=https://kubernetes.default.svc.cluster.local \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ --authorization-mode=Node,RBAC \ --enable-bootstrap-token-auth=true \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User # --token-auth-file=/etc/kubernetes/token.csv Restart=on-failure RestartSec=10s LimitNOFILE=65535 [Install] WantedBy=multi-user.target EOF
6.1.4 start apiserver (all master nodes)
systemctl daemon-reload && systemctl enable --now kube-apiserver # Pay attention to check whether the status starts normally systemctl status kube-apiserver
6.2. Configure Kube Controller Manager Service
All master The node configuration is the same 172.16.0.0/12 by pod Network segment, set your own network segment as required cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-controller-manager \ --v=2 \ --logtostderr=true \ --address=127.0.0.1 \ --root-ca-file=/etc/kubernetes/pki/ca.pem \ --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \ --service-account-private-key-file=/etc/kubernetes/pki/sa.key \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \ --leader-elect=true \ --use-service-account-credentials=true \ --node-monitor-grace-period=40s \ --node-monitor-period=5s \ --pod-eviction-timeout=2m0s \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --cluster-cidr=172.16.0.0/12 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --node-cidr-mask-size=24 Restart=always RestartSec=10s [Install] WantedBy=multi-user.target EOF
6.2.1 start Kube controller manager and check the status
systemctl daemon-reload systemctl enable --now kube-controller-manager systemctl status kube-controller-manager
6.3. Configure Kube Scheduler service
6.3.1 all master nodes are configured in the same way
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-scheduler \ --v=2 \ --logtostderr=true \ --address=127.0.0.1 \ --leader-elect=true \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig Restart=always RestartSec=10s [Install] WantedBy=multi-user.target EOF
6.3.2 start and view the service status
systemctl daemon-reload systemctl enable --now kube-scheduler systemctl status kube-scheduler
7.TLS Bootstrapping configuration
7.1 configuring on master01
cd /root/Kubernetes/bootstrap kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig # The location of the token is in bootstrap secret. Yaml, if you modify it, go to this file to modify it mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
7.2 check the cluster status. If there is no problem, continue the follow-up operation
kubectl get cs Warning: v1 ComponentStatus is deprecated in v1.19+ NAME STATUS MESSAGE ERROR controller-manager Healthy ok etcd-0 Healthy {"health":"true","reason":""} scheduler Healthy ok etcd-1 Healthy {"health":"true","reason":""} etcd-2 Healthy {"health":"true","reason":""} kubectl create -f bootstrap.secret.yaml
8.node configuration
8.1. Copy the certificate to the node node on master01
cd /etc/kubernetes/ for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do ssh $NODE mkdir -p /etc/kubernetes/pki for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE} done done
8.2.kubelet configuration
8.2.1 create relevant directories for all k8s nodes
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/ All k8s Node configuration kubelet service cat > /usr/lib/systemd/system/kubelet.service << EOF [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=docker.service Requires=docker.service [Service] ExecStart=/usr/local/bin/kubelet Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target EOF
8.2.2 configuration files of all k8s nodes configuring kubelet service
cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf << EOF [Service] Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig" Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd" Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml" Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' " ExecStart= ExecStart=/usr/local/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS EOF
8.2.3 create kubelet configuration files for all k8s nodes
cat > /etc/kubernetes/kubelet-conf.yml <<EOF apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration address: 0.0.0.0 port: 10250 readOnlyPort: 10255 authentication: anonymous: enabled: false webhook: cacheTTL: 2m0s enabled: true x509: clientCAFile: /etc/kubernetes/pki/ca.pem authorization: mode: Webhook webhook: cacheAuthorizedTTL: 5m0s cacheUnauthorizedTTL: 30s cgroupDriver: systemd cgroupsPerQOS: true clusterDNS: - 10.96.0.10 clusterDomain: cluster.local containerLogMaxFiles: 5 containerLogMaxSize: 10Mi contentType: application/vnd.kubernetes.protobuf cpuCFSQuota: true cpuManagerPolicy: none cpuManagerReconcilePeriod: 10s enableControllerAttachDetach: true enableDebuggingHandlers: true enforceNodeAllocatable: - pods eventBurst: 10 eventRecordQPS: 5 evictionHard: imagefs.available: 15% memory.available: 100Mi nodefs.available: 10% nodefs.inodesFree: 5% evictionPressureTransitionPeriod: 5m0s failSwapOn: true fileCheckFrequency: 20s hairpinMode: promiscuous-bridge healthzBindAddress: 127.0.0.1 healthzPort: 10248 httpCheckFrequency: 20s imageGCHighThresholdPercent: 85 imageGCLowThresholdPercent: 80 imageMinimumGCAge: 2m0s iptablesDropBit: 15 iptablesMasqueradeBit: 14 kubeAPIBurst: 10 kubeAPIQPS: 5 makeIPTablesUtilChains: true maxOpenFiles: 1000000 maxPods: 110 nodeStatusUpdateFrequency: 10s oomScoreAdj: -999 podPidsLimit: -1 registryBurst: 10 registryPullQPS: 5 resolvConf: /etc/resolv.conf rotateCertificates: true runtimeRequestTimeout: 2m0s serializeImagePulls: true staticPodPath: /etc/kubernetes/manifests streamingConnectionIdleTimeout: 4h0m0s syncFrequency: 1m0s volumeStatsAggPeriod: 1m0s EOF
8.2.4 start kubelet
systemctl daemon-reload systemctl restart kubelet systemctl enable --now kubelet
8.2.5 viewing clusters
kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 Ready <none> 80s v1.23.4 k8s-master02 Ready <none> 78s v1.23.4 k8s-master03 Ready <none> 74s v1.23.4 k8s-node01 Ready <none> 86s v1.23.4 k8s-node02 Ready <none> 95s v1.23.4 k8s-node03 Ready <none> 87s v1.23.4 k8s-node04 Ready <none> 65s v1.23.4 k8s-node05 Ready <none> 77s v1.23.4
8.3. Kube proxy configuration
8.3.1 this configuration is only operated in master01
cd /root/Kubernetes/ kubectl -n kube-system create serviceaccount kube-proxy kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy SECRET=$(kubectl -n kube-system get sa/kube-proxy \ --output=jsonpath='{.secrets[0].name}') JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \ --output=jsonpath='{.data.token}' | base64 -d) PKI_DIR=/etc/kubernetes/pki K8S_DIR=/etc/kubernetes kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.88:8443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
8.3.2 send kubeconfig to other nodes
for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig done for NODE in k8s-node01 k8s-node02 k8s-node03 k8s-node04 k8s-node05; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig done
8.3.3 add Kube proxy configuration and service files to all k8s nodes
cat > /usr/lib/systemd/system/kube-proxy.service << EOF [Unit] Description=Kubernetes Kube Proxy Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-proxy \ --config=/etc/kubernetes/kube-proxy.yaml \ --v=2 Restart=always RestartSec=10s [Install] WantedBy=multi-user.target EOF
cat > /etc/kubernetes/kube-proxy.yaml << EOF apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 0.0.0.0 clientConnection: acceptContentTypes: "" burst: 10 contentType: application/vnd.kubernetes.protobuf kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig qps: 5 clusterCIDR: 172.16.0.0/12 configSyncPeriod: 15m0s conntrack: max: null maxPerCore: 32768 min: 131072 tcpCloseWaitTimeout: 1h0m0s tcpEstablishedTimeout: 24h0m0s enableProfiling: false healthzBindAddress: 0.0.0.0:10256 hostnameOverride: "" iptables: masqueradeAll: false masqueradeBit: 14 minSyncPeriod: 0s syncPeriod: 30s ipvs: masqueradeAll: true minSyncPeriod: 5s scheduler: "rr" syncPeriod: 30s kind: KubeProxyConfiguration metricsBindAddress: 127.0.0.1:10249 mode: "ipvs" nodePortAddresses: null oomScoreAdj: -999 portRange: "" udpIdleTimeout: 250ms EOF
8.3.4 start Kube proxy
systemctl daemon-reload systemctl enable --now kube-proxy
9. Install Calico
9.1 the following steps are only operated in master01
9.1.1 change calico network segment
cd /root/Kubernetes/calico/ sed -i "s#POD_CIDR#172.16.0.0/12#g" calico.yaml grep "IPV4POOL_CIDR" calico.yaml -A 1 - name: CALICO_IPV4POOL_CIDR value: "172.16.0.0/12" # establish kubectl apply -f calico.yaml
9.1.2 viewing container status
kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-6f6595874c-cf2sf 1/1 Running 0 14m kube-system calico-node-2xd2q 1/1 Running 0 2m25s kube-system calico-node-jtzfh 1/1 Running 0 2m3s kube-system calico-node-k4jkc 1/1 Running 0 2m24s kube-system calico-node-msxwp 1/1 Running 0 2m15s kube-system calico-node-tv849 1/1 Running 0 2m12s kube-system calico-node-wdbzt 1/1 Running 0 2m18s kube-system calico-node-x9sjr 1/1 Running 0 2m33s kube-system calico-node-z2mz5 1/1 Running 0 2m16s kube-system calico-typha-6b6cf8cbdf-gshvt 1/1 Running 0 14m
10. Install CoreDNS
10.1 the following steps are only operated in master01
10.1.1 modification of documents
cd /root/Kubernetes/CoreDNS/ sed -i "s#KUBEDNS_SERVICE_IP#10.96.0.10#g" coredns.yaml cat coredns.yaml | grep clusterIP: clusterIP: 10.96.0.10
10.1.2 installation
kubectl create -f coredns.yaml serviceaccount/coredns created clusterrole.rbac.authorization.k8s.io/system:coredns created clusterrolebinding.rbac.authorization.k8s.io/system:coredns created configmap/coredns created deployment.apps/coredns created service/kube-dns created
11. Install Metrics Server
11.1 the following steps are only operated in master01
11.1.1 installing metrics server
In the new version of Kubernetes, the collection of system resources uses Metrics server, which can collect the utilization of memory, disk, CPU and network of nodes and pods through Metrics
install metrics server cd /root/Kubernetes/metrics-server/ kubectl create -f . serviceaccount/metrics-server created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrole.rbac.authorization.k8s.io/system:metrics-server created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created service/metrics-server created deployment.apps/metrics-server created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
11.1.2 wait a moment to check the status
kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-master01 172m 2% 1307Mi 16% k8s-master02 157m 1% 1189Mi 15% k8s-master03 155m 1% 1105Mi 14% k8s-node01 99m 1% 710Mi 9% k8s-node02 79m 0% 585Mi 7%
12. Cluster verification
12.1 deploy pod resources
cat<<EOF | kubectl apply -f - apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - name: busybox image: busybox:1.28 command: - sleep - "3600" imagePullPolicy: IfNotPresent restartPolicy: Always EOF # see kubectl get pod NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 17s
12.2 resolve kubernetes in the default namespace with pod
kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 17h kubectl exec busybox -n default -- nslookup kubernetes 3Server: 10.96.0.10 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local Name: kubernetes Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
12.3 test whether the cross namespace can be resolved
kubectl exec busybox -n default -- nslookup kube-dns.kube-system Server: 10.96.0.10 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local Name: kube-dns.kube-system Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
12.4 each node must be able to access kubernetes svc 443 of Kubernetes and service 53 of Kube DNS
telnet 10.96.0.1 443 Trying 10.96.0.1... Connected to 10.96.0.1. Escape character is '^]'. telnet 10.96.0.10 53 Trying 10.96.0.10... Connected to 10.96.0.10. Escape character is '^]'. curl 10.96.0.10:53 curl: (52) Empty reply from server
12.5Pod and Pod should be able to communicate before
kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 <none> <none> kubectl get po -n kube-system -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01 <none> <none> calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 192.168.1.30 k8s-master01 <none> <none> calico-node-g8nqd 1/1 Running 0 77m 192.168.1.33 k8s-node01 <none> <none> calico-node-mdps8 1/1 Running 0 77m 192.168.1.34 k8s-node02 <none> <none> calico-node-nf4nt 1/1 Running 0 77m 192.168.1.32 k8s-master03 <none> <none> calico-node-sq2ml 1/1 Running 0 77m 192.168.1.31 k8s-master02 <none> <none> calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 192.168.1.34 k8s-node02 <none> <none> calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 192.168.1.30 k8s-master01 <none> <none> calico-typha-8445487f56-tnssl 1/1 Running 0 77m 192.168.1.33 k8s-node01 <none> <none> coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02 <none> <none> metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03 <none> <none> # Enter busybox and Ping the pod s on other nodes kubectl exec -ti busybox -- sh / # ping 192.168.1.33 PING 192.168.1.33 (192.168.1.33): 56 data bytes 64 bytes from 192.168.1.33: seq=0 ttl=63 time=0.358 ms 64 bytes from 192.168.1.33: seq=1 ttl=63 time=0.668 ms 64 bytes from 192.168.1.33: seq=2 ttl=63 time=0.637 ms 64 bytes from 192.168.1.33: seq=3 ttl=63 time=0.624 ms 64 bytes from 192.168.1.33: seq=4 ttl=63 time=0.907 ms # It can be proved that the pod can communicate across namespaces and hosts
12.6 create three replicas, and you can see that the three replicas are distributed on different nodes (you can delete them when you run out)
cat > deployments.yaml << EOF apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80 EOF kubectl apply -f deployments.yaml deployment.apps/nginx-deployment created kubectl get pod NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 6m25s nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s # Delete nginx [root@k8s-master01 ~]# kubectl delete -f deployments.yaml
13. Install dashboard
cd /root/Kubernetes/dashboard/ kubectl create -f . serviceaccount/admin-user created clusterrolebinding.rbac.authorization.k8s.io/admin-user created namespace/kubernetes-dashboard created serviceaccount/kubernetes-dashboard created service/kubernetes-dashboard created secret/kubernetes-dashboard-certs created secret/kubernetes-dashboard-csrf created secret/kubernetes-dashboard-key-holder created configmap/kubernetes-dashboard-settings created role.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created deployment.apps/kubernetes-dashboard created service/dashboard-metrics-scraper created deployment.apps/dashboard-metrics-scraper created
13.1 create administrator user
cat > admin.yaml << EOF apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user annotations: rbac.authorization.kubernetes.io/autoupdate: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF
13.2 execution yaml document
kubectl apply -f admin.yaml -n kube-system serviceaccount/admin-user created clusterrolebinding.rbac.authorization.k8s.io/admin-user created
13.3 change the svc of dashboard to NodePort. If yes, please ignore it
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard type: NodePort
13.4 viewing port number
kubectl get svc kubernetes-dashboard -n kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes-dashboard NodePort 10.98.201.22 <none> 443:31245/TCP 10m
13.5 viewing token s
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') Name: admin-user-token-k545k Namespace: kube-system Labels: <none> Annotations: kubernetes.io/service-account.name: admin-user kubernetes.io/service-account.uid: c308071c-4cf5-4583-83a2-eaf7812512b4 Type: kubernetes.io/service-account-token Data ==== token: eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw ca.crt: 1363 bytes namespace: 11 bytes
13.6 login to dashboard
https://192.168.1.30:31245/
eyJhbGciOiJSUzI1NiIsImtpZCI6InYzV2dzNnQzV3hHb2FQWnYzdnlOSmpudmtpVmNjQW5VM3daRi12SFM4dEEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs1NDVrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMzA4MDcxYy00Y2Y1LTQ1ODMtODNhMi1lYWY3ODEyNTEyYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.pshvZPi9ZJkXUWuWilcYs1wawTpzV-nMKesgF3d_l7qyTPaK2N5ofzIThd0SjzU7BFNb4_rOm1dw1Be5kLeHjY_YW5lDnM5TAxVPXmZQ0HJ2pAQ0pjQqCHFnPD0bZFIYkeyz8pZx0Hmwcd3ZdC1yztr0ADpTAmMgI9NC2ZFIeoFFo4Ue9ZM_ulhqJQjmgoAlI_qbyjuKCNsWeEQBwM6HHHAsH1gOQIdVxqQ83OQZUuynDQRpqlHHFIndbK2zVRYFA3GgUnTu2-VRQ-DXBFRjvZR5qArnC1f383jmIjGT6VO7l04QJteG_LFetRbXa-T4mcnbsd8XutSgO0INqwKpjw
14. Install the command line automatic completion function
yum install bash-completion -y source /usr/share/bash-completion/bash_completion source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc
Appendix:
The configured Kube controller manager is valid for 100 years (if it can take effect, it can be added first)
vim /usr/lib/systemd/system/kube-controller-manager.service # [Service] find a place to add --cluster-signing-duration=876000h0m0s \ # restart systemctl daemon-reload systemctl restart kube-controller-manager
Prevent vulnerability scanning
vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf [Service] Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig" Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6" Environment="KUBELET_EXTRA_ARGS=--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --image-pull-progress-deadline=30m" ExecStart= ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
Reserve space and allocate as needed
vim /etc/kubernetes/kubelet-conf.yml rotateServerCertificates: true allowedUnsafeSysctls: - "net.core*" - "net.ipv4.*" kubeReserved: cpu: "1" memory: 1Gi ephemeral-storage: 10Gi systemReserved: cpu: "1" memory: 1Gi ephemeral-storage: 10Gi
The data disk shall be separated from the system disk; etcd uses ssd disk