智慧屏 安装 app如何提升家庭娱乐与教育体验的关键工具
1070
2022-11-30
Kubernetes pgsql使用本地 Local PV
Node 14-16全部打上污点
Node 14-16全部设置为不可调度
[root@master1 ~]# kubectl cordon node14node/node14 cordoned[root@master1 ~]# kubectl cordon node15node/node15 cordoned[root@master1 ~]# kubectl cordon node16node/node16 cordoned[root@master1 ~]# kubectl get nodeNAME STATUS ROLES AGE VERSIONmaster1 Ready master 144m v1.19.8master2 Ready master 143m v1.19.8master3 Ready master 143m v1.19.8node1 Ready worker 143m v1.19.8node10 Ready worker 143m v1.19.8node11 Ready worker 143m v1.19.8node12 Ready worker 142m v1.19.8node13 Ready worker 142m v1.19.8node14 Ready,SchedulingDisabled worker 142m v1.19.8node15 Ready,SchedulingDisabled worker 142m v1.19.8node16 Ready,SchedulingDisabled worker 142m v1.19.8node2 Ready worker 143m v1.19.8node3 Ready worker 143m v1.19.8node4 Ready worker 143m v1.19.8node5 Ready worker 143m v1.19.8node6 Ready worker 143m v1.19.8node7 Ready worker 143m v1.19.8node8 Ready worker 143m v1.19.8node9 Ready worker 143m v1.19.8
[root@master1 ~]# kubectl drain node15 --delete-local-data --ignore-daemonsets --forcenode/node15 already cordonedWARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-mcdls, kube-system/kube-proxy-wlngt, kube-system/nodelocaldns-p98d2, kubesphere-logging-system/fluent-bit-2k5hg, kubesphere-monitoring-system/node-exporter-c46txevicting pod kubesphere-monitoring-system/prometheus-operator-664b66fd6f-62s8revicting pod kubesphere-logging-system/ks-events-operator-7c55bbfc6b-9z5gpevicting pod kubesphere-monitoring-system/prometheus-k8s-0pod/prometheus-operator-664b66fd6f-62s8r evictedpod/prometheus-k8s-0 evictedpod/ks-events-operator-7c55bbfc6b-9z5gp evictednode/node15 evicted[root@master1 ~]# kubectl drain node16 --delete-local-data --ignore-daemonsets --forcenode/node16 already cordonedWARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-hr2j2, kube-system/kube-proxy-h97qw, kube-system/nodelocaldns-c575c, kubesphere-logging-system/fluent-bit-hnqg6, kubesphere-monitoring-system/node-exporter-hnl2gevicting pod kubesphere-system/openpitrix-import-job-plnkpevicting pod kubesphere-logging-system/elasticsearch-logging-data-2evicting pod kubesphere-monitoring-system/alertmanager-main-1pod/openpitrix-import-job-plnkp evictedpod/alertmanager-main-1 evictedpod/elasticsearch-logging-data-2 evictednode/node16 evicted[root@master1 ~]# kubectl drain node14 --delete-local-data --ignore-daemonsets --forcenode/node14 already cordonedWARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-fshxc, kube-system/kube-proxy-p64rr, kube-system/nodelocaldns-bflft, kubesphere-logging-system/fluent-bit-gzjtr, kubesphere-monitoring-system/node-exporter-xvqbfevicting pod kubesphere-monitoring-system/thanos-ruler-kubesphere-1evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpqlevicting pod kubesphere-logging-system/logsidecar-injector-deploy-78cbddd74b-nlkxcerror when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.evicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpqlerror when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.pod/thanos-ruler-kubesphere-1 evictedevicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpqlerror when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.pod/logsidecar-injector-deploy-78cbddd74b-nlkxc evictedevicting pod istio-system/istio-ingressgateway-6dddcbbfd5-hfpqlerror when evicting pod "istio-ingressgateway-6dddcbbfd5-hfpql" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.^C[root@master1 ~]# kubectl get pod -n istio-system -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESistio-ingressgateway-6dddcbbfd5-hfpql 1/1 Running 0 145m 10.233.103.3 node14
[root@master1 provisioner]# kubectl uncordon node14node/node14 uncordoned[root@master1 provisioner]# kubectl uncordon node15node/node15 uncordoned[root@master1 provisioner]# kubectl uncordon node16node/node16 uncordoned
创建storageclass和local pv
[root@master1 offline_deploy]# cat storageclass.yaml apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: fast-disksprovisioner: kubernetes.io/no-provisionervolumeBindingMode: WaitForFirstConsumer
[root@master1 offline_deploy]# kubectl apply -f storageclass.yaml storageclass.storage.k8s.io/fast-disks created
[root@master1 offline_deploy]# kubectl get scNAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGEfast-disks kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 5snfs-client (default) cluster.local/nfs-client-nfs-client-provisioner Delete Immediate true 163m
创建挂载点 在挂载目录下再创建一个目录,这个目录会被扫描到
[root@node14 ~]# mkdir -p /pgdata/pv[root@node15 ~]# mkdir -p /pgdata/pv[root@node16 ~]# mkdir -p /pgdata/pv
格式化和挂载,将物理磁盘`attach`到node 节点上
格式化文件系统
mkfs.xfs /dev/mapper/centos-data
挂载
mount -t xfs /dev/mapper/centos-data /pgdata/pv
挂载重启生效
DISK_UUID=$(sudo blkid -s UUID -o value /dev/mapper/centos-data) &&\ echo UUID=${DISK_UUID} /pgdata/pv xfs defaults 0 0 | sudo tee -a /etc/fstab
查看节点磁盘挂载情况
[root@node14 ~]# lsblk?.?..centos-data 253:3 0 500G 0 lvm /pgdata/pv[root@node15 ~]# lsblk?.?..centos-data 253:3 0 500G 0 lvm /pgdata/pv[root@node16 ~]# lsblk?.?..centos-data 253:3 0 500G 0 lvm /pgdata/pv
安装local-static-provisioner
#- git clone cd sig-storage-local-static-provisioner #修改以下配置 vi helm/provisioner/values.yaml 66 classes: 67 - name: fast-disks 70 hostDir: /pgdata 76 volumeMode: Filesystem 81 fsType: xfs 116 image: quay.io/external_storage/local-volume-provisioner:v2.4.0
[root@bots-hrx-ksm1 provisioner]# kubectl get pvNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGElocal-pv-913a7ee8 499Gi RWO Delete Available fast-disks 4slocal-pv-f21e5ebe 499Gi RWO Delete Available fast-disks 3slocal-pv-f3b5c31f 499Gi RWO Delete Available fast-disks 5s
[root@bots-hrx-ksm1 provisioner]# kubectl get pod -n kube-systemkube-scheduler-bots-hrx-ksm1 1/1 Running 2 32mkube-scheduler-bots-hrx-ksm2 1/1 Running 0 31mkube-scheduler-bots-hrx-ksm3 1/1 Running 1 31mlocalprovi-provisioner-5qtbl 1/1 Running 0 3mlocalprovi-provisioner-8nlxr 1/1 Running 0 3m1slocalprovi-provisioner-bwgq9 1/1 Running 0 3m1slocalprovi-provisioner-db6q5 1/1 Running 0 3mlocalprovi-provisioner-drgfl 1/1 Running 0 3mlocalprovi-provisioner-gcmbg 1/1 Running 0 3m1slocalprovi-provisioner-kv29z 1/1 Running 0 3mlocalprovi-provisioner-mb6ss 1/1 Running 0 3m1slocalprovi-provisioner-mg8mk 1/1 Running 0 3mlocalprovi-provisioner-mqzws 1/1 Running 0 3m1slocalprovi-provisioner-q7jnv 1/1 Running 0 3m1slocalprovi-provisioner-qmrdh 1/1 Running 0 3mlocalprovi-provisioner-t7lfp 1/1 Running 0 3mlocalprovi-provisioner-w8lcq 1/1 Running 0 3m1slocalprovi-provisioner-zz9dg 1/1 Running 0 3m
[root@bots-hrx-ksw14 ~]# df -hFilesystem Size Used Avail Use% Mounted on/dev/mapper/centos-data 500G 33M 500G 1% /pgdata/pv
设置节点亲和性,先打标签
部署Operator
修改镜像仓库地址、命名空间
[root@bots-hrx-ksm1 kubectl]# pwd/root/offline_deploy/radondb-postgresql-operator/installers/kubectl[root@bots-hrx-ksm1 kubectl]# vim postgres-operator.yml pgo_client_install: "true"` pgo_client_container_install: "true" pgo_image_prefix: "198.1.229.203/docker.io/radondb" - name: pgo-deploy image: 198.1.229.203/docker.io/radondb/pgo-deployer:centos8-4.7.1 imagePullPolicy: Always ```yaml storage5_name: "storageos" storage5_access_mode: "ReadWriteOnce" storage5_size: "5Gi" storage5_type: "dynamic" storage5_class: "fast-disks" ````kubectl apply -f postgres-operator.yml `pgo create cluster pgcluster --toleration=node-role.kubernetes.io/pg:NoSchedule --replica-count=2 --cpu=4 --cpu-limit=4 --memory=16Gi --memory-limit=16Gi --pgbackrest-cpu=4 --pgbackrest-cpu-limit=4 --pgbackrest-memory=1Gi --pgbackrest-memory-limit=1Gi --node-label=node-role.kubernetes.io/pg= --node-affinity-type=required --storage-config=storageos --metrics --pgbouncer --pgbouncer-replicas=2 --ccp-image-tag='centos8-12.7-4.7.1' --replica-storage-config=storageos --ccp-image-prefix='198.1.229.203/docker.io/radondb' -n bots-hrx-pgo[root@bots-hrx-ksm2 ~]# kubectl get pod -n bots-hrx-pgo -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESbackrest-backup-pgcluster-8pj9w 0/1 Completed 0 16m 10.233.84.15 bots-hrx-ksw15
最后调整将所有组件放在一起
spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: node-role.kubernetes.io/pg operator: In values: - '' tolerations: - key: node-role.kubernetes.io/pg operator: Exists
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~