Volume存储卷

  • Volume将容器中的指定数据和容器解耦,并将数据存储到指定的位置,不同的存储卷功能不一致,如果是基于网络存储的存储卷可以实现容器间的数据共享和持久化。
  • 静态存储卷需要在使用前手动创建pv和pvc,然后绑定至pod使用
  • 常用的几种卷:
    • emptyDir:本地临时卷
    • hostPath:本地存储卷
    • configmap:配置文件
    • Secret:是一种包含少量敏感信息例如密码、令牌或密钥的对象
    • nfs等网络存储卷

https://kubernetes.io/zh/docs/concepts/storage/volumes/

一、emptyDir

emptyDir:本地临时卷,pod删除卷也跟着删除,通常用于两个pod之间日志共享

  • 当pod被分配给节点时,首先创建emptydir卷,并且只要该Pod在该节点上运行,该卷就会存在,正如卷的名字所述,它最初是空的,Pod中的容器可以读取和写入emptyDir卷中的相同文件,尽管该卷可以挂载到每个容器中的相同或不同路径上,当出于任何原因从节点中删除pod时,emptyDir中的数据将被永久删除。
  • /var/lib/kubelet/pods/$ID/volumes/kubernetes.io~emptydir/cache-volume/$FILE
[root@haproxy1 case5-emptyDir]# 
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        #在容器中挂载指定的volumes
        - mountPath: /cache
          name: cache-volume
      volumes:
      #定义volumes名称
      - name: cache-volume
      #定义volumes类型
        emptyDir: {}
#挂载完成查看pod所调度到哪个节点上
[root@haproxy1 case5-emptyDir]# kubectl get pods  -o wide 
NAME                                      READY   STATUS              RESTARTS      AGE     IP               NODE            NOMINATED NODE   READINESS GATES

nginx-deployment-cf796fbd-hx4mv           1/1     Running             0             3d1h    10.200.169.144   172.16.92.141   <none>           <none>
#查看上面已经将pod调度到172.16.92.141上,可以到该节点查看是否已经存在cache-volume这个卷
[root@k8s-node2 ~]# find /var/lib/ -name 'cache-volume'
/var/lib/kubelet/pods/43b5be70-9c9e-47ce-b4ce-9a98b65de774/volumes/kubernetes.io~empty-dir/cache-volume
/var/lib/kubelet/pods/43b5be70-9c9e-47ce-b4ce-9a98b65de774/plugins/kubernetes.io~empty-dir/cache-volume

#测试——————————可以进入pod的/cache 挂载目录,创建文件或文件夹,然后再到141节点上查看是否已经创建成功
[root@haproxy1 case5-emptyDir]# kubectl exec -it nginx
nginx                            nginx-deployment-cf796fbd-hx4mv  
[root@haproxy1 case5-emptyDir]# kubectl exec -it nginx-deployment-cf796fbd-hx4mv  bash 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-cf796fbd-hx4mv:/# 
root@nginx-deployment-cf796fbd-hx4mv:/# cd cache/
root@nginx-deployment-cf796fbd-hx4mv:/cache# mkdir nihaoa 
root@nginx-deployment-cf796fbd-hx4mv:/cache# ls 
nihaoa
#回到141节点查看是否已经创建了nihaoa的文件夹
[root@k8s-node2 cache-volume]# cd /var/lib/kubelet/pods/43b5be70-9c9e-47ce-b4ce-9a98b65de774/volumes/kubernetes.io~empty-dir/cache-volume
[root@k8s-node2 cache-volume]# ll
总用量 0
drwxr-xr-x 2 root root 6 518 11:10 nihaoa
[root@k8s-node2 cache-volume]# 

#一旦将pod删除,宿主机的目录也会被删除
[root@haproxy1 case5-emptyDir]# kubectl delete -f  deploy_emptyDir.yml 
deployment.apps "nginx-deployment" deleted
[root@k8s-node2 ~]# find /var/lib/ -name 'cache-volume'
二、hostPath

hostpath卷将主机节点上文件系统中的文件或目录挂载到集群中,pod删除的时候卷不会被删除

root@nginx-deployment-cf796fbd-hx4mv:/cache# exit 
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        hostPath:
          path: /tmp/linux66

1

三、nfs等共享存储(常用v3版本,v4可能会有问题)

nfs卷允许将现有的NFS(网络文件系统)共享挂载到容器中,不像emptyDir,当删除Pod时,nfs卷的内容会被保留,卷仅仅是被卸载,这意味着nfs卷可以预填充数据,并且可以在pod之间“切换”数据。NFS可以被多个写入者同时挂载。

需要安装nfs服务端:

yum install -y nfs-utils rpcbind
 systemctl start nfs-utils
 systemctl status nfs-utils
[root@haproxy1 case7-nfs]# cat /etc/exports
#注意,这里用*只是测试,生产环境需要填写服务器的网段,如172.16.0.0/16,不能写pod网段,要写宿主机网段
/data/k8sdata *(rw,no_root_squash)
#检查配置文件是否正常
[root@haproxy1 case7-nfs]# exportfs  -r
#如果showmount遇到以下报错,关闭服务端防火墙
clnt_create: RPC: Port mapper failure - Unable to receive: errno 113 (No route to host)
#到客户端挂载试试
[root@k8s-master1 ~]# showmount -e 172.16.92.160
Export list for 172.16.92.160:
/data/k8sdata 172.16.92.0/24
[root@k8s-master1 ~]# mount -t nfs 172.16.92.160:/data/k8sdata /application
[root@k8s-master1 ~]# 
[root@k8s-master1 ~]# df -h 
文件系统                     容量  已用  可用 已用% 挂载点
/dev/sda5                     70G  5.0G   66G    8% /
devtmpfs                     2.8G     0  2.8G    0% /dev
tmpfs                        2.8G     0  2.8G    0% /dev/shm
tmpfs                        2.8G  292M  2.5G   11% /run
tmpfs                        2.8G     0  2.8G    0% /sys/fs/cgroup
/dev/sda2                     20G   33M   20G    1% /home
/dev/sda1                    100G  1.1G   99G    2% /var
tmpfs                        5.3G   12K  5.3G    1% /var/lib/kubelet/pods/d9b2f0b8-d5b7-4949-a687-468281be5f83/volumes/kubernetes.io~secret/etcd-certs
tmpfs                        5.3G   12K  5.3G    1% /var/lib/kubelet/pods/d9b2f0b8-d5b7-4949-a687-468281be5f83/volumes/kubernetes.io~projected/kube-api-access-nj7r9
overlay                      100G  1.1G   99G    2% /var/lib/docker/overlay2/75d1ac39be42a35a7e8f18d51f089d51004141fec406f6caea4a15eeeeffa569/merged
shm                           64M     0   64M    0% /var/lib/docker/containers/89234494f4af49c5bb9fbcf0a7446817d92a71863ab7f079a2790b2b623d3170/mounts/shm
overlay                      100G  1.1G   99G    2% /var/lib/docker/overlay2/4755c360c33f2f974117ae6ad11318aafe3a32719db05bf5f6de2b75204fe405/merged
tmpfs                        565M     0  565M    0% /run/user/0
172.16.92.160:/data/k8sdata   70G  8.1G   62G   12% /application

  • 创建多个pod测试挂载同一个nfs
[root@haproxy1 case7-nfs]# cat deploy_nfs.yml 
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        #挂载到容器的这个目录
        - mountPath: /usr/share/nginx/html/mysite
        #调用哪个volumes名称
          name: my-nfs-volume
      volumes:
      - name: my-nfs-volume
        nfs:
        #nfs服务端ip
          server: 172.31.7.109
        #nfs挂载点
          path: /data/k8sdata/linux66

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

#进入pod查看

[root@haproxy1 case7-nfs]# kubectl exec -it nginx-deployment-57cbf4c5f4-z6xqd bash 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-57cbf4c5f4-z6xqd:/# cd /usr/share/nginx/html/mysite
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# 
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# ls 
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# 
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# mkdir nihao 
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# df -h 
Filesystem                   Size  Used Avail Use% Mounted on
overlay                      100G  1.8G   99G   2% /
tmpfs                         64M     0   64M   0% /dev
tmpfs                        2.8G     0  2.8G   0% /sys/fs/cgroup
/dev/sda1                    100G  1.8G   99G   2% /etc/hosts
shm                           64M     0   64M   0% /dev/shm
172.16.92.160:/data/k8sdata   70G  8.1G   62G  12% /usr/share/nginx/html/mysite
tmpfs                        5.3G   12K  5.3G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                        2.8G     0  2.8G   0% /proc/acpi
tmpfs                        2.8G     0  2.8G   0% /proc/scsi
tmpfs                        2.8G     0  2.8G   0% /sys/firmware
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# 
root@nginx-deployment-57cbf4c5f4-z6xqd:/usr/share/nginx/html/mysite# ls 
a  hahahhaha  nihao
#可以查看nfs服务端已经有了
[root@haproxy1 case7-nfs]# ll /data/k8sdata/
总用量 0
-rw-r--r-- 1 root root 0 518 12:35 a
-rw-r--r-- 1 root root 0 518 12:35 hahahhaha
drwxr-xr-x 2 root root 6 518 12:35 nihao

#测试——————————创建一个图片到pod内的nginx站点目录中看看是否能访问

volume2.png

volume3.png

挂载流程:

会在宿主机挂载目录到nfs服务器,然后通过联合文件挂载系统,将目录映射到容器的目录使用,容器内没有内核,不识别nfs驱动

  • 创建多个pod测试每个pod挂载多个nfs
[root@haproxy1 case7-nfs]# kubectl exec -it nginx-deployment-site2-76f4fdb9f5-sz6k7 bash 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-site2-76f4fdb9f5-sz6k7:/# ls 
bin  boot  dev  docker-entrypoint.d  docker-entrypoint.sh  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  tmp  usr  var
root@nginx-deployment-site2-76f4fdb9f5-sz6k7:/# df -h 
Filesystem                   Size  Used Avail Use% Mounted on
overlay                      100G  1.8G   99G   2% /
tmpfs                         64M     0   64M   0% /dev
tmpfs                        2.8G     0  2.8G   0% /sys/fs/cgroup
/dev/sda1                    100G  1.8G   99G   2% /etc/hosts
shm                           64M     0   64M   0% /dev/shm
tmpfs                        5.3G   12K  5.3G   1% /run/secrets/kubernetes.io/serviceaccount
172.16.92.160:/data/k8sdata   70G  8.1G   62G  12% /usr/share/nginx/html/mysite
172.16.92.161:/data/dujie     70G  1.1G   69G   2% /usr/share/nginx/html/magedu
tmpfs                        2.8G     0  2.8G   0% /proc/acpi
tmpfs                        2.8G     0  2.8G   0% /proc/scsi
tmpfs                        2.8G     0  2.8G   0% /sys/firmware