diff --git a/kubernetes-HA/基于二进制构建Kubernetes高可用集群.md b/kubernetes-HA/基于二进制构建Kubernetes高可用集群.md index 790908c..7664379 100644 --- a/kubernetes-HA/基于二进制构建Kubernetes高可用集群.md +++ b/kubernetes-HA/基于二进制构建Kubernetes高可用集群.md @@ -1062,10 +1062,10 @@ users: null 配置 admin ```shell -[root@k8s-master1 k8s-work]# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config +[root@xingdiancloud-native-master-a k8s-work]#v kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config User "admin" set. -[root@k8s-master1 k8s-work]# cat kube.config +[root@xingdiancloud-native-master-a k8s-work]# cat kube.config apiVersion: v1 clusters: - cluster: @@ -1086,9 +1086,9 @@ users: 创建上下文 ```shell -[root@k8s-master1 k8s-work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config Context "kubernetes" created. -[root@k8s-master1 k8s-work]# cat kube.config +[root@xingdiancloud-native-master-a k8s-work]# cat kube.config apiVersion: v1 clusters: - cluster: @@ -1112,9 +1112,1253 @@ users: ##### 6.5 准备kubectl配置文件并进行角色绑定 +```shell +[root@xingdiancloud-native-master-a k8s-work]# mkdir ~/.kube +[root@xingdiancloud-native-master-a k8s-work]# cp kube.config ~/.kube/config +[root@xingdiancloud-native-master-a k8s-work]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config +``` + +##### 6.6 查看集群状态 + +查看集群信息 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl cluster-info +``` + +查看集群组件状态 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl get componentstatuses +``` + +查看命名空间中资源对象 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl get all --all-namespaces +``` + +##### 6.7 同步kubectl配置文件到集群其它master节点 + +```shell +xingdiancloud-native-master-b 节点上,创建文件夹 +[root@xingdiancloud-native-master-b ~]# mkdir /root/.kube + +把配置文件同步过去 +[root@xingdiancloud-native-master-a ~]# scp /root/.kube/config xingdiancloud-native-master-b:/root/.kube/config +``` + +#### 7.部署kube-controller-manager + +##### 7.1 创建kube-controller-manager证书请求文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-controller-manager-csr.json << "EOF" +{ + "CN": "system:kube-controller-manager", + "key": { + "algo": "rsa", + "size": 2048 + }, + "hosts": [ + "127.0.0.1", + "10.9.12.60", + "10.9.12.64" + ], + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-controller-manager", + "OU": "system" + } + ] +} +EOF +``` + +##### 7.2 创建kube-controller-manager证书文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager +``` + +注意: + +​ kube-controller-manager.csr + +​ kube-controller-manager-csr.json + +​ kube-controller-manager-key.pem + +​ kube-controller-manager.pem + +##### 7.3 创建kube-controller-manager的kube-controller-manager.kubeconfig + +```shell +[root@xingdiancloud-native-master-a ~]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.9.12.100:6443 --kubeconfig=kube-controller-manager.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig +``` + +##### 7.4 创建kube-controller-manager配置文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-controller-manager.conf << "EOF" +KUBE_CONTROLLER_MANAGER_OPTS=" \ + --secure-port=10257 \ + --bind-address=127.0.0.1 \ + --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ + --service-cluster-ip-range=10.96.0.0/16 \ + --cluster-name=kubernetes \ + --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \ + --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \ + --allocate-node-cidrs=true \ + --cluster-cidr=10.244.0.0/16 \ + --root-ca-file=/etc/kubernetes/ssl/ca.pem \ + --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \ + --leader-elect=true \ + --feature-gates=RotateKubeletServerCertificate=true \ + --controllers=*,bootstrapsigner,tokencleaner \ + --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \ + --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ + --use-service-account-credentials=true \ + --v=2" +EOF +``` + +##### 7.5 创建服务启动文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-controller-manager.service << "EOF" +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +EnvironmentFile=/etc/kubernetes/kube-controller-manager.conf +ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +``` + +##### 7.6 同步文件到集群master节点 + +内部拷贝 + +```shell +[root@xingdiancloud-native-master-a ~]# cp kube-controller-manager*.pem /etc/kubernetes/ssl/ +[root@xingdiancloud-native-master-a ~]# cp kube-controller-manager.kubeconfig /etc/kubernetes/ +[root@xingdiancloud-native-master-a ~]# cp kube-controller-manager.conf /etc/kubernetes/ +[root@xingdiancloud-native-master-a ~]# cp kube-controller-manager.service /usr/lib/systemd/system/ +``` + +远程拷贝 + +```shell +[root@xingdiancloud-native-master-a ~]# scp kube-controller-manager*.pem xingdiancloud-native-master-b:/etc/kubernetes/ssl/ + +[root@xingdiancloud-native-master-a ~]# scp kube-controller-manager.kubeconfig kube-controller-manager.conf xingdiancloud-native-master-b:/etc/kubernetes/ + +[root@xingdiancloud-native-master-a ~]# scp kube-controller-manager.service xingdiancloud-native-master-b:/usr/lib/systemd/system/ +``` + +#### 8.部署kube-scheduler + +##### 8.1 创建kube-scheduler证书请求文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-scheduler-csr.json << "EOF" +{ + "CN": "system:kube-scheduler", + "hosts": [ + "127.0.0.1", + "10.9.12.60", + "10.9.12.64" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "system:kube-scheduler", + "OU": "system" + } + ] +} +EOF +``` + +##### 8.2 生成kube-scheduler证书 + +```shell +[root@xingdiancloud-native-master-a ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler +``` + +注意: + +​ kube-scheduler.csr + +​ kube-scheduler-csr.json + +​ kube-scheduler-key.pem + +​ kube-scheduler.pem + +```shell +[root@xingdiancloud-native-master-a ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler +``` + +##### 8.3 创建kube-scheduler的kubeconfig + +```shell +[root@xingdiancloud-native-master-a ~]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.9.12.100:6443 --kubeconfig=kube-scheduler.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig + +[root@xingdiancloud-native-master-a ~]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig +``` + +##### 8.4 创建服务配置文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-scheduler.conf << "EOF" +KUBE_SCHEDULER_OPTS=" \ +--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ +--leader-elect=true \ +--v=2" +EOF +``` + +##### 8.5 创建服务启动配置文件 + +```shell +[root@xingdiancloud-native-master-a ~]# cat > kube-scheduler.service << "EOF" +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf +ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +``` + +##### 8.6 同步文件至集群master节点 + +内部拷贝 + +```shell +[root@xingdiancloud-native-master-a ~]# cp kube-scheduler*.pem /etc/kubernetes/ssl/ +[root@xingdiancloud-native-master-a ~]# cp kube-scheduler.kubeconfig /etc/kubernetes/ +[root@xingdiancloud-native-master-a ~]# cp kube-scheduler.conf /etc/kubernetes/ +[root@xingdiancloud-native-master-a ~]# cp kube-scheduler.service /usr/lib/systemd/system/ +``` + +外部拷贝 + +```shell +[root@xingdiancloud-native-master-a ~]# scp kube-scheduler*.pem xingdiancloud-native-master-b:/etc/kubernetes/ssl/ + +[root@xingdiancloud-native-master-a ~]# scp kube-scheduler.kubeconfig kube-scheduler.conf xingdiancloud-native-master-b:/etc/kubernetes/ + +[root@xingdiancloud-native-master-a ~]# scp kube-scheduler.service xingdiancloud-native-master-b:/usr/lib/systemd/system/ +``` + +##### 8.7 启动服务 + +注意:所有master节点 + +``` +[root@xingdiancloud-native-master-a ~]# systemctl daemon-reload +[root@xingdiancloud-native-master-a ~]# systemctl enable --now kube-scheduler +[root@xingdiancloud-native-master-a ~]# systemctl status kube-scheduler + +[root@xingdiancloud-native-master-a ~]# systemctl status kube-scheduler +● kube-scheduler.service - Kubernetes Scheduler + Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled) + Active: active (running) since Thu 2024-01-04 16:45:18 CST; 1min 9s ago + Docs: https://github.com/kubernetes/kubernetes + Main PID: 6131 (kube-scheduler) + CGroup: /system.slice/kube-scheduler.service + └─6131 /usr/local/bin/kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig --leader-elect=true --v=2 + +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: schedulerName: default-scheduler +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: > +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.437013 6131 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.0" +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.437027 6131 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.438927 6131 tlsconfig.go:200] "Loaded serving cert" certName="Generated self signed cert" cer... +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439276 6131 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopbac... +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439311 6131 secure_serving.go:210] Serving securely on [::]:10259 +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439359 6131 tlsconfig.go:240] "Starting DynamicServingCertificateController" +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.540853 6131 leaderelection.go:250] attempting to acquire leader lease kube-system/ku...eduler... +Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.555700 6131 leaderelection.go:260] successfully acquired lease kube-system/kube-scheduler +Hint: Some lines were ellipsized, use -l to show in full. +``` + +```shell +[root@xingdiancloud-native-master-a ~]# kubectl get cs +Warning: v1 ComponentStatus is deprecated in v1.19+ +NAME STATUS MESSAGE ERROR +controller-manager Healthy ok +scheduler Healthy ok +etcd-0 Healthy ok +``` + +#### 9.工作节点(worker node)部署 + +##### 9.1 容器运行时docker部署 + +注意: + +​ 所有worker节点 + +```shell +[root@xingdiancloud-native-node-a ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo + +[root@xingdiancloud-native-node-a ~]# yum -y install docker-ce + +[root@xingdiancloud-native-node-a ~]# systemctl enable --now docker + +[root@xingdiancloud-native-node-a ~]# cat << EOF | sudo tee /etc/docker/daemon.json +{ +"exec-opts": ["native.cgroupdriver=systemd"] +} +EOF + +[root@xingdiancloud-native-node-a ~]# systemctl restart docker +``` + +##### 9.2 cri-dockerd安装 + +注意: + +​ cri-dockerd是docker容器的接口 + +​ 所有worker节点都安装 + +```shell +[root@xingdiancloud-native-node-a ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9-3.el7.x86_64.rpm + +[root@xingdiancloud-native-node-a ~]# yum install -y cri-dockerd-0.3.9-3.el7.x86_64.rpm + +[root@xingdiancloud-native-node-a ~]# vi /usr/lib/systemd/system/cri-docker.service +#修改第10行内容,默认启动的pod镜像太低,指定到3.9版本。使用阿里云的镜像仓库,国内下载镜像会比较快 +ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd:// +``` + +![image-20240619104159741](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240619104159741.png) + +```shell +[root@xingdiancloud-native-node-a ~]# systemctl enable --now cri-docker +Created symlink from /etc/systemd/system/multi-user.target.wants/cri-docker.service to /usr/lib/systemd/system/cri-docker.service. + +[root@xingdiancloud-native-node-a ~]# systemctl status cri-docker +● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (/usr/lib/systemd/system/cri-docker.service; enabled; vendor preset: disabled) + Active: active (running) since Fri 2024-01-05 08:29:57 CST; 3s ago + Docs: https://docs.mirantis.com + Main PID: 1821 (cri-dockerd) + Tasks: 7 + Memory: 13.8M + CGroup: /system.slice/cri-docker.service + └─1821 /usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9 --container-runtime-endpoint fd:// + +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Start docker client with request timeout 0s" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Hairpin mode is set to none" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Loaded network plugin cni" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Docker cri networking managed by network plugin cni" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Setting cgroupDriver systemd" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkC...idr:,},}" +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Starting the GRPC backend for the Docker CRI interface." +Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Start cri-dockerd grpc backend" +Jan 05 08:29:57 k8s-node2 systemd[1]: Started CRI Interface for Docker Application Container Engine. +Hint: Some lines were ellipsized, use -l to show in full. +``` + +在run目录下可以看到cri-dockerd.sock ,这个就是后面kubelet调用docker的sock + +```shell +[root@xingdiancloud-native-node-a run]# ll /run/cri-dockerd.sock +srw-rw---- 1 root docker 0 Jan 5 08:33 /run/cri-dockerd.sock +``` + +##### 9.3 部署kubelet + +注意: + +​ 在xingdiancloud-native-master-a上执行 + +###### 9.3.1 创建kubelet-bootstrap.kubeconfig + +```shell +[root@xingdiancloud-native-master-a k8s-work]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv) + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.9.12.100:6443 --kubeconfig=kubelet-bootstrap.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap + +[root@xingdiancloud-native-master-a k8s-work]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig +``` + +###### 9.3.2 创建kubelet配置文件 + +注意: + +​ 所有worker节点操作 + +```shell +[root@xingdiancloud-native-node-a ~]# mkdir -p /etc/kubernetes/ssl + +xingdiancloud-native-node-a 配置文件: +[root@xingdiancloud-native-node-a ~]# cat > /etc/kubernetes/kubelet.json << "EOF" +{ + "kind": "KubeletConfiguration", + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "authentication": { + "x509": { + "clientCAFile": "/etc/kubernetes/ssl/ca.pem" + }, + "webhook": { + "enabled": true, + "cacheTTL": "2m0s" + }, + "anonymous": { + "enabled": false + } + }, + "authorization": { + "mode": "Webhook", + "webhook": { + "cacheAuthorizedTTL": "5m0s", + "cacheUnauthorizedTTL": "30s" + } + }, + "address": "10.9.12.66", + "port": 10250, + "readOnlyPort": 10255, + "cgroupDriver": "systemd", + "hairpinMode": "promiscuous-bridge", + "serializeImagePulls": false, + "clusterDomain": "cluster.local.", + "clusterDNS": ["10.96.0.2"] +} +EOF + +xingdiancloud-native-node-b 配置文件: +[root@xingdiancloud-native-node-b ~]# cat > /etc/kubernetes/kubelet.json << "EOF" +{ + "kind": "KubeletConfiguration", + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "authentication": { + "x509": { + "clientCAFile": "/etc/kubernetes/ssl/ca.pem" + }, + "webhook": { + "enabled": true, + "cacheTTL": "2m0s" + }, + "anonymous": { + "enabled": false + } + }, + "authorization": { + "mode": "Webhook", + "webhook": { + "cacheAuthorizedTTL": "5m0s", + "cacheUnauthorizedTTL": "30s" + } + }, + "address": "10.9.12.65", + "port": 10250, + "readOnlyPort": 10255, + "cgroupDriver": "systemd", + "hairpinMode": "promiscuous-bridge", + "serializeImagePulls": false, + "clusterDomain": "cluster.local.", + "clusterDNS": ["10.96.0.2"] +} +EOF + +xingdiancloud-native-node-c 配置文件: +[root@xingdiancloud-native-node-c ~]# cat > /etc/kubernetes/kubelet.json << "EOF" +{ + "kind": "KubeletConfiguration", + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "authentication": { + "x509": { + "clientCAFile": "/etc/kubernetes/ssl/ca.pem" + }, + "webhook": { + "enabled": true, + "cacheTTL": "2m0s" + }, + "anonymous": { + "enabled": false + } + }, + "authorization": { + "mode": "Webhook", + "webhook": { + "cacheAuthorizedTTL": "5m0s", + "cacheUnauthorizedTTL": "30s" + } + }, + "address": "10.9.12.67", + "port": 10250, + "readOnlyPort": 10255, + "cgroupDriver": "systemd", + "hairpinMode": "promiscuous-bridge", + "serializeImagePulls": false, + "clusterDomain": "cluster.local.", + "clusterDNS": ["10.96.0.2"] +} +EOF +``` + +###### 9.3.3 创建kubelet服务启动管理文件 + +在worker节点上创建kubulet的工作目录,所有worker节点 + +```shell +[root@xingdiancloud-native-node-a ~]# mkdir /var/lib/kubelet +``` + +在worker节点上创建kubulet的配置文件,所有worker节点 + +```shell +[root@xingdiancloud-native-node-a ~]# cat > /usr/lib/systemd/system/kubelet.service << "EOF" +[Unit] +Description=Kubernetes Kubelet +Documentation=https://github.com/kubernetes/kubernetes +After=docker.service +Requires=docker.service + +[Service] +WorkingDirectory=/var/lib/kubelet +ExecStart=/usr/local/bin/kubelet \ + --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ + --cert-dir=/etc/kubernetes/ssl \ + --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ + --config=/etc/kubernetes/kubelet.json \ + --container-runtime-endpoint=unix:///run/cri-dockerd.sock \ + --rotate-certificates \ + --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF +``` + +###### 9.3.4 同步文件到集群节点 + +xingdiancloud-native-master-a 上生成的kubelet-bootstrap.kubeconfig,ca.pem同步到node节点 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# for i in xingdiancloud-native-node-a xingdiancloud-native-node-b xingdiancloud-native-node-c;do scp kubelet-bootstrap.kubeconfig $i:/etc/kubernetes/;done + +[root@xingdiancloud-native-master-a k8s-work]# for i in xingdiancloud-native-node-a xingdiancloud-native-node-b xingdiancloud-native-node-c;do scp ca.pem $i:/etc/kubernetes/ssl;done +``` + +把二进制文件分发到node节点 + +```shell +[root@xingdiancloud-native-master-a bin]# for i in xingdiancloud-native-node-a xingdiancloud-native-node-b xingdiancloud-native-node-c;do scp kubelet kube-scheduler $i:/usr/local/bin/;done +``` + +###### 9.3.5 启动服务 + +注意: + +​ 所有worker节点 + +``` +[root@xingdiancloud-native-node-a ~]# systemctl daemon-reload +[root@xingdiancloud-native-node-a ~]# systemctl enable --now kubelet + +[root@xingdiancloud-native-node-a ~]# systemctl status kubelet +● kubelet.service - Kubernetes Kubelet + Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled) + Active: active (running) since Fri 2024-01-05 09:21:20 CST; 3min 40s ago + Docs: https://github.com/kubernetes/kubernetes + Main PID: 6177 (kubelet) + CGroup: /system.slice/kubelet.service + └─6177 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig --cert-dir=/etc/kubernetes/ssl --kubeconfig=/etc/kub... + +Jan 05 09:24:11 k8s-node1 kubelet[6177]: E0105 09:24:11.632795 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:16 k8s-node1 kubelet[6177]: E0105 09:24:16.633986 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:21 k8s-node1 kubelet[6177]: E0105 09:24:21.713576 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:26 k8s-node1 kubelet[6177]: E0105 09:24:26.714288 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:31 k8s-node1 kubelet[6177]: E0105 09:24:31.715295 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:36 k8s-node1 kubelet[6177]: E0105 09:24:36.717562 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:41 k8s-node1 kubelet[6177]: E0105 09:24:41.718346 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:46 k8s-node1 kubelet[6177]: E0105 09:24:46.719040 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:51 k8s-node1 kubelet[6177]: E0105 09:24:51.721244 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Jan 05 09:24:56 k8s-node1 kubelet[6177]: E0105 09:24:56.722712 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized" +Hint: Some lines were ellipsized, use -l to show in full. +``` + +worker节点都已加入集群,没有在master上安装kubelet,master只作为管理节点所以看不到master节点 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl get nodes +NAME STATUS ROLES AGE VERSION +xingdiancloud-native-node-a NotReady 3m59s v1.28.0 +xingdiancloud-native-node-b NotReady 43s v1.28.0 +xingdiancloud-native-node-c NotReady 43s v1.28.0 +``` + +##### 9.4 部署kube-proxy + +###### 9.4.1 创建kube-proxy证书请求文件 + +注意: + +​ 在xingdiancloud-native-master-a上执行 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# cat > kube-proxy-csr.json << "EOF" +{ + "CN": "system:kube-proxy", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "CN", + "ST": "Beijing", + "L": "Beijing", + "O": "kubemsb", + "OU": "CN" + } + ] +} +EOF +``` + +###### 9.4.2 生成证书 + +注意: + +​ 在xingdiancloud-native-master-a上执行 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy + +[root@xingdiancloud-native-master-a k8s-work]# ls kube-proxy* +kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem +``` + +###### 9.4.3 创建kubeconfig文件 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.9.12.100:6443 --kubeconfig=kube-proxy.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig + +[root@xingdiancloud-native-master-a k8s-work]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig +``` + +###### 9.4.4 创建服务配置文件 + +注意: + +​ 在worker节点上配置 + +```shell +[root@xingdiancloud-native-node-a ~]# cat > /etc/kubernetes/kube-proxy.yaml << "EOF" +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 10.9.12.66 +clientConnection: + kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig +clusterCIDR: 10.244.0.0/16 +healthzBindAddress: 10.9.12.66:10256 +kind: KubeProxyConfiguration +metricsBindAddress: 10.9.12.66:10249 +mode: "ipvs" +EOF + +[root@xingdiancloud-native-node-b ~]# cat > /etc/kubernetes/kube-proxy.yaml << "EOF" +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 10.9.12.65 +clientConnection: + kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig +clusterCIDR: 10.244.0.0/16 +healthzBindAddress: 10.9.12.65:10256 +kind: KubeProxyConfiguration +metricsBindAddress: 10.9.12.65:10249 +mode: "ipvs" +EOF + +[root@xingdiancloud-native-node-c ~]# cat > /etc/kubernetes/kube-proxy.yaml << "EOF" +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 10.9.12.67 +clientConnection: + kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig +clusterCIDR: 10.244.0.0/16 +healthzBindAddress: 10.9.12.67:10256 +kind: KubeProxyConfiguration +metricsBindAddress: 10.9.12.67:10249 +mode: "ipvs" +EOF +``` + +###### 9.4.5 创建服务启动管理文件 + +注意: + +​ 在worker节点上配置 + +```shell +[root@xingdiancloud-native-node-a ~]# mkdir -p /var/lib/kube-proxy +``` + +```shell +[root@xingdiancloud-native-node-a ~]# cat > /usr/lib/systemd/system/kube-proxy.service << "EOF" +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/kubernetes/kubernetes +After=network.target + +[Service] +WorkingDirectory=/var/lib/kube-proxy +ExecStart=/usr/local/bin/kube-proxy \ + --config=/etc/kubernetes/kube-proxy.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOF +``` + +###### 9.4.6 同步文件到集群工作节点主机 + +注意: + +​ 在xingdiancloud-native-master-a节点上操作 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# ls kube-proxy* +kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.kubeconfig kube-proxy.pem + +[root@xingdiancloud-native-master-a k8s-work]# for i in xingdiancloud-native-node-a xingdiancloud-native-node-b xingdiancloud-native-node-c;do scp kube-proxy.kubeconfig $i:/etc/kubernetes/;done + +[root@xingdiancloud-native-master-a k8s-work]# for i in xingdiancloud-native-node-a xingdiancloud-native-node-b xingdiancloud-native-node-c;do scp kube-proxy*pem $i:/etc/kubernetes/ssl; done +``` + +###### 9.4.7 服务启动 + +注意: + +​ 所有worker节点 + +```shell +[root@xingdiancloud-native-node-a ~]# systemctl daemon-reload +[root@xingdiancloud-native-node-a ~]# systemctl enable --now kube-proxy + +[root@xingdiancloud-native-node-a ~]# systemctl status kube-proxysystemctl status kube-proxy +Unit kube-proxysystemctl.service could not be found. +Unit status.service could not be found. +● kube-proxy.service - Kubernetes Kube-Proxy Server + Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled) + Active: active (running) since Fri 2024-01-05 10:53:12 CST; 38s ago + Docs: https://github.com/kubernetes/kubernetes + Main PID: 11727 (kube-proxy) + Tasks: 5 + Memory: 17.1M + CGroup: /system.slice/kube-proxy.service + └─11727 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.yaml --v=2 + +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.551675 11727 shared_informer.go:311] Waiting for caches to sync for endpoint slice config +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.552316 11727 config.go:315] "Starting node config controller" +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.552333 11727 shared_informer.go:311] Waiting for caches to sync for node config +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.569095 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master" +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.569432 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master" +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652277 11727 shared_informer.go:318] Caches are synced for endpoint slice config +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652318 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master" +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652328 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master" +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652339 11727 shared_informer.go:318] Caches are synced for service config +Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652544 11727 shared_informer.go:318] Caches are synced for node config +Hint: Some lines were ellipsized, use -l to show in full. +``` + +##### 9.5 网络组件部署 Calico + +注意: + +​ 在calico的官网进行下载对应的yaml文件,在我们master节点上创建 + +​ 下载地址:https://docs.tigera.io/calico/latest/about + +​ 选择calico v3.26版本 + +```shell +#把对应命令复制过来,不需要执行 +kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml +kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml +#先使用wget下载后,检查文件正常后在进行部署 +[root@xingdiancloud-native-master-a k8s-work]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml +[root@xingdiancloud-native-master-a k8s-work]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml +[root@xingdiancloud-native-master-a k8s-work]# ll *yaml +-rw-r--r-- 1 root root 824 Jan 5 13:50 custom-resources.yaml +-rw-r--r-- 1 root root 1475581 Jan 5 13:50 tigera-operator.yaml +``` +###### 9.5.1 修改文件 +```shell +#custom-resources.yaml文件默认的pod网络为192.168.0.0/16,我们定义的pod网络为10.244.0.0/16,需要修改后再执行 + cidr: 192.168.0.0/16 修改成 cidr: 10.244.0.0/16 +``` + +注意: + +​ Docker-Hub在中国大陆访问被隔断 + +​ Calico中所有的镜像都需要从Docker-Hub下载 + +​ 执行之前需要事先准备好所需要的镜像,并导入到各个worker节点 + +###### 9.5.2 应用文件 + +```shell +#执行tigera-operator.yaml +[root@xingdiancloud-native-master-a k8s-work]# kubectl create -f tigera-operator.yaml +namespace/tigera-operator created +customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created +customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io created +customresourcedefinition.apiextensions.k8s.io/imagesets.operator.tigera.io created +customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io created +customresourcedefinition.apiextensions.k8s.io/tigerastatuses.operator.tigera.io created +serviceaccount/tigera-operator created +clusterrole.rbac.authorization.k8s.io/tigera-operator created +clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created +deployment.apps/tigera-operator created + +[root@xingdiancloud-native-master-a k8s-work]# kubectl get ns +NAME STATUS AGE +default Active 23h +kube-node-lease Active 23h +kube-public Active 23h +kube-system Active 23h +tigera-operator Active 18s +[root@xingdiancloud-native-master-a k8s-work]# kubectl get pod -n tigera-operator +NAME READY STATUS RESTARTS AGE +tigera-operator-7f8cd97876-tdjlq 1/1 Running 0 23s +``` + +tigera-operator中pod都running后,执行 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl create -f custom-resources.yaml +installation.operator.tigera.io/default created +apiserver.operator.tigera.io/default created + +[root@xingdiancloud-native-master-a k8s-work]# kubectl get ns +NAME STATUS AGE +calico-system Active 52s +default Active 23h +kube-node-lease Active 23h +kube-public Active 23h +kube-system Active 23h +tigera-operator Active 95s +[root@xingdiancloud-native-master-a k8s-work]# kubectl get pod -n calico-system +NAME READY STATUS RESTARTS AGE +calico-kube-controllers-798969c8c4-dbnct 1/1 Running 0 4m31s +calico-node-742hh 1/1 Running 0 4m32s +calico-node-c5dcj 1/1 Running 0 4m32s +calico-typha-5f789cd78d-zdklj 1/1 Running 0 4m33s +csi-node-driver-86xh4 2/2 Running 0 4m32s +csi-node-driver-b6czj 2/2 Running 0 4m32s +``` + +###### 9.5.3 集群验证 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl get nodes +NAME STATUS ROLES AGE VERSION +xingdiancloud-native-node-a Ready 7d15h v1.28.0 +xingdiancloud-native-node-b Ready 7d15h v1.28.0 +xingdiancloud-native-node-c Ready 7d15h v1.28.0 +``` + +##### 9.6 **部署CoreDNS** + +###### 9.6.1 创建对应yaml文件 + +注意: + +​ 镜像需要事先导入到所有的worker节点 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# cat > coredns.yaml << "EOF" +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: + - apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + # replicas: not specified here: + # 1. Default is 1. + # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + nodeSelector: + kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: ["kube-dns"] + topologyKey: kubernetes.io/hostname + containers: + - name: coredns + image: coredns/coredns:1.10.1 + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.96.0.2 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + +EOF +``` + +###### 9.6.2 执行对应yaml文件创建 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl apply -f coredns.yaml +serviceaccount/coredns created +clusterrole.rbac.authorization.k8s.io/system:coredns created +clusterrolebinding.rbac.authorization.k8s.io/system:coredns created +configmap/coredns created +deployment.apps/coredns created +service/kube-dns created +``` + +###### 9.6.3 查看是否创建成功 + +```shell +[root@xingdiancloud-native-master-a k8s-work]# kubectl get pod -n kube-system -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +coredns-758895f87b-phqsp 1/1 Running 0 98s 10.244.169.131 xingdiancloud-native-node-b +``` + +###### 9.6.4 验证dns域名解析是否正常 + +```shell + +[root@xingdiancloud-native-node-a ~]# dig -t a www.baidu.com @10.96.0.2 + +; <<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.15 <<>> -t a www.baidu.com @10.96.0.2 +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 54240 +;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 4096 +;; QUESTION SECTION: +;www.baidu.com. IN A + +;; ANSWER SECTION: +www.baidu.com. 12 IN CNAME www.a.shifen.com. +www.a.shifen.com. 12 IN CNAME www.wshifen.com. +www.wshifen.com. 12 IN A 103.235.46.40 + +;; Query time: 74 msec +;; SERVER: 10.96.0.2#53(10.96.0.2) +;; WHEN: Fri Jan 05 15:48:18 CST 2024 +;; MSG SIZE rcvd: 161 +``` + +## 六:部署应用验证 + +#### 1.创建Nginx应用 + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: xaddt + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-xingdian + labels: + xingdian: nginx + namespace: xaddt + +spec: + replicas: 2 + selector: + matchLabels: + xingdian: nginx + template: + metadata: + labels: + xingdian: nginx + spec: + containers: + - name: nginx-xingdian + image: 10.9.12.201/xingdian/nginx:v1 + ports: + - containerPort: 80 + +--- +apiVersion: v1 +kind: Service +metadata: + name: my-xaddt + namespace: xaddt +spec: + selector: + xingdian: nginx + type: NodePort + ports: + - port: 80 + targetPort: 80 + nodePort: 30011 +``` + +#### 2.执行 + +```shell +[root@xingdiancloud-native-master-a ~]# kubectl create -f nginx-deployment.yaml +namespace/xaddt created +deployment.apps/nginx-xingdian created +service/my-xaddt created +``` + +#### 3.浏览器验证 + +​ 略