在nfs主机192.168.88.213上启动nfs

mkdir -p /application/consul_data

vim /etc/exports
/application/consul_data *(rw,no_root_squash,sync)

exportfs -r
systemctl restart rpcbind && systemctl restart nfs

创建rbac

cat > nfs-provisioner-rbac.yaml <<EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

EOF
kubectl apply -f nfs-provisioner-rbac.yaml

配置一个 deployment 服务

cat > nfs-provisioner-deploy.yaml <<EOF
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  selector:
    matchLabels:
      app: nfs-client-provisioner
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccount: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: myapps.runtime/nfs
            - name: NFS_SERVER
              value: 192.168.88.213
            - name: NFS_PATH
              value: /application/consuldata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.88.213
            path: /application/consuldata

EOF

kubectl apply -f nfs-provisioner-deploy.yaml

创建 StorageClass

cat > nfs-storageclass.yaml <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
provisioner: myapps.runtime/nfs #fmyapps.runtime/nfs 是 nfs-client-provisioner 服务中的一个 env

EOF
kubectl apply -f nfs-storageclass.yaml

测试用动态存储类创建pvc

cat > test-pvc1.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-pvc001
  namespace: default
  labels: # 这些labels可以不定义
    name: nfs-pvc001
    storetype: nfs
    capacity: 500Mi
spec:
  storageClassName: nfs-storage
  accessModes:  # PVC也需要定义访问模式,不过它的模式一定是和现有PV相同或者是它的子集,否则匹配不到PV
  - ReadWriteMany
  resources: # 定义资源要求PV满足这个PVC的要求才会被匹配到
    requests:
      storage: 500Mi  # 定义要求有多大空间


EOF

kubectl create -f test-pvc1.yaml
kubectl delete -f test-pvc1.yaml

在consul在kubernetes中部署

mkdir /consul cd /consul git clone https://github.com/hashicorp/consul-helm.git

  • 修改values.yaml

vim consul-helm/values.yaml

  • 开起service ```

    enabled: false

    enabled: true

注释掉ClusterIP

type: ClusterIP

使用NodePort type: NodePort nodePort: 40213

type: NodePort nodePort: 40213

ui改为

type: NodePort

修改storageClass为之前创建的,默认大小为10G可以不用修改

storageClass: nfs-storage

# 部署consul

helm install hashicorp ./consul-helm


# 卸载

helm uninstall hashicorp


# consul备份

vim backup.sh

!/bin/sh

DATE=date +%Y%m%d%H%M

export CONSUL_HTTP_ADDR="192.168.3.21:28501" export CONSUL_HTTP_TOKEN="9f012e18-1a62-4a0a-4d8e-f6ef9f45c689"

echo $CONSUL_HTTP_ADDR echo $CONSUL_HTTP_TOKEN

consul kv export > /root/xiaochen-toolkit/scripts/consul/backup_data/kv.dump.$DATE consul snapshot save /root/xiaochen-toolkit/scripts/consul/backup_data/backup.snap.$DATE

```

results matching ""

    No results matching ""