The default service discovery in k8s is to automatically associate the service name to the clusterIP through dns
Deploy the resource configuration list HTTP service in k8s:
On the hdss7-200 host, configure a nginx virtual host to provide a unified resource configuration list access portal for the k8s intranet:
[root@hdss7-200 conf.d]# cat k8s-yaml.od.com.conf server { listen 80; server_name k8s-yaml.od.com; location / { autoindex on; default_type text/plain; root /data/k8s-yaml; } } nginx -s reload mkdir -p /data/k8s-yaml/coredns
Configure domain name resolution in Intranet dns:
named]# vi od.com.zone k8s-yaml A 10.4.7.200 named]# systemctl restart named
Resolution verification:
named]# dig @10.4.7.11 k8s-yaml.od.com +short 10.4.7.200
Install and deploy CoreDNS to deliver software to k8s as a container:
Pull the official image from docker Harbor:
docker pull coredns/coredns:1.6.1 k8s-yaml]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1 [root@hdss7-200 k8s-yaml]# docker push harbor.od.com/public/coredns:v1.6.1
Prepare resource allocation list:
# pwd /data/k8s-yaml/coredns # cat rbac.yaml apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: Reconcile name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: EnsureExists name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system # cat dp.yaml apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/name: "CoreDNS" spec: replicas: 1 selector: matchLabels: k8s-app: coredns template: metadata: labels: k8s-app: coredns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns containers: - name: coredns image: harbor.od.com/public/coredns:v1.6.1 args: - -conf - /etc/coredns/Corefile volumeMounts: - name: config-volume mountPath: /etc/coredns ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile # cat cm.yaml apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors log health ready kubernetes cluster.local 192.168.0.0/16 forward . 10.4.7.11 cache 30 loop reload loadbalance } # cat svc.yaml apiVersion: v1 kind: Service metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: coredns clusterIP: 192.168.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 - name: metrics port: 9153 protocol: TCP
Use the declarative resource management approach to apply our declarative resource allocation checklist:
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml serviceaccount/coredns created clusterrole.rbac.authorization.k8s.io/system:coredns created clusterrolebinding.rbac.authorization.k8s.io/system:coredns created [root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml configmap/coredns created [root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml deployment.apps/coredns created [root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml service/coredns created [root@hdss7-21 ~]# kubectl get all -n kube-system NAME READY STATUS RESTARTS AGE pod/coredns-6b6c4f9648-j7cv9 1/1 Running 0 34s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 31s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/coredns 1/1 1 1 34s NAME DESIRED CURRENT READY AGE replicaset.apps/coredns-6b6c4f9648 1 1 1 34s
Resolve to verify coreDNS is available:
~]# dig -t A www.baidu.com +short @192.168.0.2 www.a.shifen.com. 14.215.177.39 14.215.177.38 ~]# dig -t A harbor.od.com +short @192.168.0.2 10.4.7.200 [root@hdss7-21 ~]# kubectl get svc -n kube-public NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-dp ClusterIP 192.168.47.82 <none> 80/TCP 10h
We must use the global domain name to resolve the ip address of the service outside the cluster
[root@hdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @192.168.0.2 +short 192.168.47.82
However, if you are in the cluster, you can use the short domain name. You can see the resolved configuration file:
[root@hdss7-21 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-ds-rxfqd 1/1 Running 0 11h 172.7.22.2 hdss7-22.host.com <none> <none> nginx-ds-xm5l2 1/1 Running 0 11h 172.7.21.2 hdss7-21.host.com <none> <none> [root@hdss7-21 ~]# kubectl exec -it nginx-ds-xm5l2 -n default bash
The default k8s has helped us configure the serch, that is, we can use the short domain name to resolve the ip address
root@nginx-ds-xm5l2:/# cat /etc/resolv.conf nameserver 192.168.0.2 search default.svc.cluster.local svc.cluster.local cluster.local host.com options ndots:5