diff --git a/kubernetes-MD/基于Kubernetes构建ES集群.md b/kubernetes-MD/基于Kubernetes构建ES集群.md
index 0c99ca7..7a91dce 100644
--- a/kubernetes-MD/基于Kubernetes构建ES集群.md
+++ b/kubernetes-MD/基于Kubernetes构建ES集群.md
@@ -1,314 +1,314 @@
-
基于Kubernetes集群构建ES集群
-
-作者:行癫(盗版必究)
-
-------
-
-## 一:环境准备
-
-#### 1.Kubernetes集群环境
-
-| 节点 | 地址 |
-| :---------------: | :---------: |
-| Kubernetes-Master | 10.9.12.206 |
-| Kubernetes-Node-1 | 10.9.12.205 |
-| Kubernetes-Node-2 | 10.9.12.204 |
-| Kubernetes-Node-3 | 10.9.12.203 |
-| DNS服务器 | 10.9.12.210 |
-| 代理服务器 | 10.9.12.209 |
-| NFS存储 | 10.9.12.250 |
-
-#### 2.Kuboard集群管理
-
-![image-20240420164922730](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420164922730.png)
-
-## 二:构建ES集群
-
-#### 1.持久化存储构建
-
-1.NFS服务器部署
-
- 略
-
-2.创建共享目录
-
- 本次采用脚本创建,脚本如下
-
-```shell
-[root@xingdiancloud-1 ~]# cat nfs.sh
-#!/bin/bash
-read -p "请输入您要创建的共享目录:" dir
-if [ -d $dir ];then
- echo "请重新输入共享目录: "
- read again_dir
- mkdir $again_dir -p
- echo "共享目录创建成功"
- read -p "请输入共享对象:" ips
- echo "$again_dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
- xingdian=`cat /etc/exports |grep "$again_dir" |wc -l`
- if [ $xingdian -eq 1 ];then
- echo "成功配置共享"
- exportfs -rv >/dev/null
- exit
- else
- exit
- fi
-else
- mkdir $dir -p
- echo "共享目录创建成功"
- read -p "请输入共享对象:" ips
- echo "$dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
- xingdian=`cat /etc/exports |grep "$dir" |wc -l`
- if [ $xingdian -eq 1 ];then
- echo "成功配置共享"
- exportfs -rv >/dev/null
- exit
- else
- exit
- fi
-fi
-```
-
-3.创建存储类
-
-```yaml
-[root@xingdiancloud-master ~]# vim namespace.yaml
-apiVersion: v1
-kind: Namespace
-metadata:
- name: logging
-[root@xingdiancloud-master ~]# vim storageclass.yaml
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- annotations:
- k8s.kuboard.cn/storageNamespace: logging
- k8s.kuboard.cn/storageType: nfs_client_provisioner
- name: data-es
-parameters:
- archiveOnDelete: 'false'
-provisioner: nfs-data-es
-reclaimPolicy: Retain
-volumeBindingMode: Immediate
-```
-
-4.创建存储卷
-
-```yaml
-[root@xingdiancloud-master ~]# vim persistenVolume.yaml
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- annotations:
- pv.kubernetes.io/bound-by-controller: 'yes'
- finalizers:
- - kubernetes.io/pv-protection
- name: nfs-pv-data-es
-spec:
- accessModes:
- - ReadWriteMany
- capacity:
- storage: 100Gi
- claimRef:
- apiVersion: v1
- kind: PersistentVolumeClaim
- name: nfs-pvc-data-es
- namespace: kube-system
- nfs:
- path: /data/es-data
- server: 10.9.12.250
- persistentVolumeReclaimPolicy: Retain
- storageClassName: nfs-storageclass-provisioner
- volumeMode: Filesystem
-```
-
-注意:存储类和存储卷也可以使用Kuboard界面创建
-
-#### 2.设定节点标签
-
-```shell
-[root@xingdiancloud-master ~]# kubectl label nodes xingdiancloud-node-1 es=log
-```
-
-注意:
-
- 所有运行ES的节点需要进行标签的设定
-
- 目的配合接下来的StatefulSet部署ES集群
-
-#### 3.ES集群部署
-
- 注意:由于ES集群每个节点需要唯一的网络标识,并需要持久化存储,Deployment不能实现该特点只能进行无状态应用的部署,故本次将采用StatefulSet进行部署。
-
-```yaml
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: es
- namespace: logging
-spec:
- serviceName: elasticsearch
- replicas: 3
- selector:
- matchLabels:
- app: elasticsearch
- template:
- metadata:
- labels:
- app: elasticsearch
- spec:
- nodeSelector:
- es: log
- initContainers:
- - name: increase-vm-max-map
- image: busybox
- command: ["sysctl", "-w", "vm.max_map_count=262144"]
- securityContext:
- privileged: true
- - name: increase-fd-ulimit
- image: busybox
- command: ["sh", "-c", "ulimit -n 65536"]
- securityContext:
- privileged: true
- containers:
- - name: elasticsearch
- image: 10.9.12.201/xingdian/es:7.6.2
- ports:
- - name: rest
- containerPort: 9200
- - name: inter
- containerPort: 9300
- resources:
- limits:
- cpu: 500m
- memory: 4000Mi
- requests:
- cpu: 500m
- memory: 3000Mi
- volumeMounts:
- - name: data
- mountPath: /usr/share/elasticsearch/data
- env:
- - name: cluster.name
- value: k8s-logs
- - name: node.name
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: cluster.initial_master_nodes
- value: "es-0,es-1,es-2"
- - name: discovery.zen.minimum_master_nodes
- value: "2"
- - name: discovery.seed_hosts
- value: "elasticsearch"
- - name: ESJAVAOPTS
- value: "-Xms512m -Xmx512m"
- - name: network.host
- value: "0.0.0.0"
- - name: node.max_local_storage_nodes
- value: "3"
- volumeClaimTemplates:
- - metadata:
- name: data
- labels:
- app: elasticsearch
- spec:
- accessModes: [ "ReadWriteMany" ]
- storageClassName: data-es
- resources:
- requests:
- storage: 25Gi
-```
-
-#### 4.创建Services发布ES集群
-
-```yaml
-[root@xingdiancloud-master ~]# vim elasticsearch-svc.yaml
-kind: Service
-apiVersion: v1
-metadata:
- name: elasticsearch
- namespace: logging
- labels:
- app: elasticsearch
-spec:
- selector:
- app: elasticsearch
- type: NodePort
- ports:
- - port: 9200
- targetPort: 9200
- nodePort: 30010
- name: rest
- - port: 9300
- name: inter-node
-```
-
-#### 5.访问测试
-
-注意:
-
- 使用elasticVUE插件访问集群
-
- 集群状态正常
-
- 集群所有节点正常
-
-![image-20240420172247845](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420172247845.png)
-
-## 三:代理及DNS配置
-
-#### 1.代理配置
-
-注意:
-
- 部署略
-
- 在此使用Nginx作为代理
-
- 基于用户的访问控制用户和密码自行创建(htpasswd)
-
- 配置文件如下
-
-```shell
-[root@proxy ~]# cat /etc/nginx/conf.d/elasticsearch.conf
-server {
- listen 80;
- server_name es.xingdian.com;
- location / {
- auth_basic "xingdiancloud kibana";
- auth_basic_user_file /etc/nginx/pass;
- proxy_pass http://地址+端口;
-
- }
-
-
-}
-```
-
-#### 2.域名解析配置
-
-注意:
-
- 部署略
-
- 配置如下
-
-```shell
-[root@www ~]# cat /var/named/xingdian.com.zone
-$TTL 1D
-@ IN SOA @ rname.invalid. (
- 0 ; serial
- 1D ; refresh
- 1H ; retry
- 1W ; expire
- 3H ) ; minimum
- NS @
- A DNS地址
-es A 代理地址
- AAAA ::1
-```
-
-#### 3.访问测试
-
+基于Kubernetes集群构建ES集群
+
+作者:行癫(盗版必究)
+
+------
+
+## 一:环境准备
+
+#### 1.Kubernetes集群环境
+
+| 节点 | 地址 |
+| :---------------: | :---------: |
+| Kubernetes-Master | 10.9.12.206 |
+| Kubernetes-Node-1 | 10.9.12.205 |
+| Kubernetes-Node-2 | 10.9.12.204 |
+| Kubernetes-Node-3 | 10.9.12.203 |
+| DNS服务器 | 10.9.12.210 |
+| 代理服务器 | 10.9.12.209 |
+| NFS存储 | 10.9.12.250 |
+
+#### 2.Kuboard集群管理
+
+![image-20240420164922730](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420164922730.png)
+
+## 二:构建ES集群
+
+#### 1.持久化存储构建
+
+1.NFS服务器部署
+
+ 略
+
+2.创建共享目录
+
+ 本次采用脚本创建,脚本如下
+
+```shell
+[root@xingdiancloud-1 ~]# cat nfs.sh
+#!/bin/bash
+read -p "请输入您要创建的共享目录:" dir
+if [ -d $dir ];then
+ echo "请重新输入共享目录: "
+ read again_dir
+ mkdir $again_dir -p
+ echo "共享目录创建成功"
+ read -p "请输入共享对象:" ips
+ echo "$again_dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
+ xingdian=`cat /etc/exports |grep "$again_dir" |wc -l`
+ if [ $xingdian -eq 1 ];then
+ echo "成功配置共享"
+ exportfs -rv >/dev/null
+ exit
+ else
+ exit
+ fi
+else
+ mkdir $dir -p
+ echo "共享目录创建成功"
+ read -p "请输入共享对象:" ips
+ echo "$dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
+ xingdian=`cat /etc/exports |grep "$dir" |wc -l`
+ if [ $xingdian -eq 1 ];then
+ echo "成功配置共享"
+ exportfs -rv >/dev/null
+ exit
+ else
+ exit
+ fi
+fi
+```
+
+3.创建存储类
+
+```yaml
+[root@xingdiancloud-master ~]# vim namespace.yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: logging
+[root@xingdiancloud-master ~]# vim storageclass.yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ annotations:
+ k8s.kuboard.cn/storageNamespace: logging
+ k8s.kuboard.cn/storageType: nfs_client_provisioner
+ name: data-es
+parameters:
+ archiveOnDelete: 'false'
+provisioner: nfs-data-es
+reclaimPolicy: Retain
+volumeBindingMode: Immediate
+```
+
+4.创建存储卷
+
+```yaml
+[root@xingdiancloud-master ~]# vim persistenVolume.yaml
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ annotations:
+ pv.kubernetes.io/bound-by-controller: 'yes'
+ finalizers:
+ - kubernetes.io/pv-protection
+ name: nfs-pv-data-es
+spec:
+ accessModes:
+ - ReadWriteMany
+ capacity:
+ storage: 100Gi
+ claimRef:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ name: nfs-pvc-data-es
+ namespace: kube-system
+ nfs:
+ path: /data/es-data
+ server: 10.9.12.250
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: nfs-storageclass-provisioner
+ volumeMode: Filesystem
+```
+
+注意:存储类和存储卷也可以使用Kuboard界面创建
+
+#### 2.设定节点标签
+
+```shell
+[root@xingdiancloud-master ~]# kubectl label nodes xingdiancloud-node-1 es=log
+```
+
+注意:
+
+ 所有运行ES的节点需要进行标签的设定
+
+ 目的配合接下来的StatefulSet部署ES集群
+
+#### 3.ES集群部署
+
+ 注意:由于ES集群每个节点需要唯一的网络标识,并需要持久化存储,Deployment不能实现该特点只能进行无状态应用的部署,故本次将采用StatefulSet进行部署。
+
+```yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: es
+ namespace: logging
+spec:
+ serviceName: elasticsearch
+ replicas: 3
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ nodeSelector:
+ es: log
+ initContainers:
+ - name: increase-vm-max-map
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ - name: increase-fd-ulimit
+ image: busybox
+ command: ["sh", "-c", "ulimit -n 65536"]
+ securityContext:
+ privileged: true
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
+ ports:
+ - name: rest
+ containerPort: 9200
+ - name: inter
+ containerPort: 9300
+ resources:
+ limits:
+ cpu: 500m
+ memory: 4000Mi
+ requests:
+ cpu: 500m
+ memory: 3000Mi
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ env:
+ - name: cluster.name
+ value: k8s-logs
+ - name: node.name
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: cluster.initial_master_nodes
+ value: "es-0,es-1,es-2"
+ - name: discovery.zen.minimum_master_nodes
+ value: "2"
+ - name: discovery.seed_hosts
+ value: "elasticsearch"
+ - name: ESJAVAOPTS
+ value: "-Xms512m -Xmx512m"
+ - name: network.host
+ value: "0.0.0.0"
+ - name: node.max_local_storage_nodes
+ value: "3"
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: elasticsearch
+ spec:
+ accessModes: [ "ReadWriteMany" ]
+ storageClassName: data-es
+ resources:
+ requests:
+ storage: 25Gi
+```
+
+#### 4.创建Services发布ES集群
+
+```yaml
+[root@xingdiancloud-master ~]# vim elasticsearch-svc.yaml
+kind: Service
+apiVersion: v1
+metadata:
+ name: elasticsearch
+ namespace: logging
+ labels:
+ app: elasticsearch
+spec:
+ selector:
+ app: elasticsearch
+ type: NodePort
+ ports:
+ - port: 9200
+ targetPort: 9200
+ nodePort: 30010
+ name: rest
+ - port: 9300
+ name: inter-node
+```
+
+#### 5.访问测试
+
+注意:
+
+ 使用elasticVUE插件访问集群
+
+ 集群状态正常
+
+ 集群所有节点正常
+
+![image-20240420172247845](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420172247845.png)
+
+## 三:代理及DNS配置
+
+#### 1.代理配置
+
+注意:
+
+ 部署略
+
+ 在此使用Nginx作为代理
+
+ 基于用户的访问控制用户和密码自行创建(htpasswd)
+
+ 配置文件如下
+
+```shell
+[root@proxy ~]# cat /etc/nginx/conf.d/elasticsearch.conf
+server {
+ listen 80;
+ server_name es.xingdian.com;
+ location / {
+ auth_basic "xingdiancloud kibana";
+ auth_basic_user_file /etc/nginx/pass;
+ proxy_pass http://地址+端口;
+
+ }
+
+
+}
+```
+
+#### 2.域名解析配置
+
+注意:
+
+ 部署略
+
+ 配置如下
+
+```shell
+[root@www ~]# cat /var/named/xingdian.com.zone
+$TTL 1D
+@ IN SOA @ rname.invalid. (
+ 0 ; serial
+ 1D ; refresh
+ 1H ; retry
+ 1W ; expire
+ 3H ) ; minimum
+ NS @
+ A DNS地址
+es A 代理地址
+ AAAA ::1
+```
+
+#### 3.访问测试
+
略
\ No newline at end of file