kubernetes 使用NFS製作動態PV/PVC
參考:https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client/deploy
安裝NFS
yum -y install nfs-utils rpcbind mkdir -p /data/pvdata chown -R nfsnobody.nfsnobody /data/pvdata echo "/data/pvdata 192.168.18.0/24(rw,async,all_squash)" >> /etc/exports systemctl enable rpcbind.service systemctl enable nfs-server.service systemctl start rpcbind.service #埠是111 systemctl start nfs-server.service # 埠是 2049 # exportfs -v /data/pvdata192.168.18.0/24(async,wdelay,hide,no_subtree_check,sec=sys,rw,secure,root_squash,all_squash)
新建storageclass.yaml
# cat storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-nfs-storage provisioner: baiyongjie.com/nfs #這裡自定義,和deployment檔案中的PROVISIONER_NAME必須一致 parameters: archiveOnDelete: "false" # kubectl apply -f storageclass.yaml storageclass.storage.k8s.io/managed-nfs-storage created
新建deployment,部署nfs-client
# cat deployment.yaml apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner --- kind: Deployment apiVersion: extensions/v1beta1 metadata: name: nfs-client-provisioner spec: replicas: 1 strategy: type: Recreate template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: baiyongjie.com/nfs #這裡和storageclass.yaml檔案中的provisioner要一致 - name: NFS_SERVER value: 192.168.18.251 #NFS_SERVER的地址 - name: NFS_PATH value: /data/pvdata#NFS_SERVER共享的目錄 volumes: - name: nfs-client-root nfs: server: 192.168.18.251#NFS_SERVER的地址 path: /data/pvdata#NFS_SERVER共享的目錄 # kubectl apply -f deployment.yaml serviceaccount/nfs-client-provisioner created deployment.extensions/nfs-client-provisioner created
新建rbac
# cat rbac.yaml kind: ServiceAccount apiVersion: v1 metadata: name: nfs-client-provisioner --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io # kubectl apply -f rbac.yaml serviceaccount/nfs-client-provisioner unchanged clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
建立測試PVC的Pod
此Pod用的是官方的配置檔案,啟動後會在pvc目錄下建立一個SUCCESS檔案,建立後Pod會退出
# cat test-claim.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-claim annotations: volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"#這裡和storageclass.yaml檔案中的name要一致 spec: accessModes: - ReadWriteMany resources: requests: storage: 1Mi --- kind: Pod apiVersion: v1 metadata: name: test-pod spec: containers: - name: test-pod image: busybox command: - "/bin/sh" args: - "-c" - "touch /mnt/SUCCESS && exit 0 || exit 1" volumeMounts: - name: nfs-pvc mountPath: "/mnt" restartPolicy: "Never" volumes: - name: nfs-pvc persistentVolumeClaim: claimName: test-claim # kubectl apply -f test-claim.yaml persistentvolumeclaim/test-claim created pod/test-pod created # kubectl get pv NAMECAPACITYACCESS MODESRECLAIM POLICYSTATUSCLAIMSTORAGECLASSREASONAGE pvc-324e43e9-5205-11e9-864f-8cec4b5f0fe51MiRWXDeleteBounddefault/test-claimmanaged-nfs-storage12s # kubectl get pvc NAMESTATUSVOLUMECAPACITYACCESS MODESSTORAGECLASSAGE test-claimBoundpvc-324e43e9-5205-11e9-864f-8cec4b5f0fe51MiRWXmanaged-nfs-storage17s # kubectl get pods NAMEREADYSTATUSRESTARTSAGE nfs-client-provisioner-db87b59d4-js2j71/1Running094s test-pod0/1Completed051s#執行完成後就已經退出了 // 檢視資料是否被保留,可以看到SUCCESS檔案還是存在的 # tree /data/pvdata/ /data/pvdata/ └── default-test-claim-pvc-324e43e9-5205-11e9-864f-8cec4b5f0fe5 └── busybox └── SUCCESS
建立測試PVC的Nginx-pod
建立一個一直執行的Pod觀察資料清空,這裡已nginx的log日誌為例
# cat test-nginx-pod.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-nginx-claim#申請一個名為test-nginx-claim的PVC annotations: volume.beta.kubernetes.io/storage-class: "managed-nfs-storage" spec: accessModes: - ReadWriteMany#允許多讀寫 resources: requests: storage: 100Mi#PVC大小為100GB --- kind: Pod apiVersion: v1 metadata: name: test-nginx-pod spec: containers: - name: test-nginx-pod image: nginx ports: - containerPort: 80 volumeMounts: - name: nfs-pvc mountPath: "/var/log/nginx" subPath: "nginx/logs" volumes: - name: nfs-pvc persistentVolumeClaim: claimName: test-nginx-claim #掛載上面申請的PVC # kubectl apply-f test-nginx-pod.yaml persistentvolumeclaim/test-nginx-claim created pod/test-nginx-pod created # kubectl get pv NAMECAPACITYACCESS MODESRECLAIM POLICYSTATUSCLAIMSTORAGECLASSREASONAGE pvc-6422cf52-5206-11e9-864f-8cec4b5f0fe5100MiRWXDeleteBounddefault/test-nginx-claimmanaged-nfs-storage15s # kubectl get pvc NAMESTATUSVOLUMECAPACITYACCESS MODESSTORAGECLASSAGE test-nginx-claimBoundpvc-6422cf52-5206-11e9-864f-8cec4b5f0fe5100MiRWXmanaged-nfs-storage8m2s # kubectl get pods -o wide NAMEREADYSTATUSRESTARTSAGEIPNODENOMINATED NODEREADINESS GATES nfs-client-provisioner-db87b59d4-js2j71/1Running010m10.244.0.46master<none><none> test-nginx-pod1/1Running065s10.244.0.47master<none><none> # tree /data/pvdata/default-test-nginx-claim-pvc-6422cf52-5206-11e9-864f-8cec4b5f0fe5 /data/pvdata/default-test-nginx-claim-pvc-6422cf52-5206-11e9-864f-8cec4b5f0fe5 └── nginx └── logs ├── access.log └── error.log // 使用for迴圈請求十次,然後檢視日誌 # for ((i=1;i<=10;i=i+1));do curl -sI 10.244.0.47 > /dev/null;done // 可以看到剛好記錄十次日誌 # cat /data/pvdata/default-test-nginx-claim-pvc-6422cf52-5206-11e9-864f-8cec4b5f0fe5/nginx/logs/access.log 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-" 10.244.0.1 - - [29/Mar/2019:09:42:38 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"