GlusterFs对接K8S
# GlusterFs对接K8S
# 什么是glusterfs
glusterfs是一个开源分布式文件系统,具有强大的横向扩展能力,可支持数pb存储容量和数千客户端,通过网络互联成一个并行的网络文件系统,具有可扩展性、高性能、高可用等特点。
常用资源:
pool 存储资源池
peer 节点
volume 卷 必须处于start才可用
brick存储单元(硬盘),可增,可减
gluster
glusterfs添加节点是默认本机是localhost,只需要添加其他机器即可,每个节点都是主
glusterfs默认监听49152端口
# 安装glusterfs
我在本地直接用Kubernetes集群做测试,已经搭建好了三个节点的Kubernetes集群之后,并没有配置后端的存储。
主机名 | ip地址 | 环境 |
---|---|---|
master | 192.168.0.128 | centos7.6,增加两块20G硬盘 |
node1 | 192.168.0.37 | centos7.6,增加两块20G硬盘 |
node2 | 192.168.0.112 | centos7.6,增加两块20G硬盘 |
//所有节点执行
[root@master ~]# yum install centos-release-gluster -y
[root@master ~]# yum install install glusterfs-server -y
[root@master ~]# systemctl start glusterd.service
[root@master ~]# systemctk enable glusterd.service
#创建两个目录代替添加的硬盘
[root@master ~]# mkdir -p /test1
[root@master ~]# mkdir -p /test2
# 添加glusterfs资源池:
//master节点执行
#查看当前的gluster存储池
root@master ~]# gluster pool list
UUID Hostname State
bc6a2d93-6cca-4b4d-a414-2a19e45a46f3 localhost Connected
#添加gluster资源池
[root@master ~]# gluster peer probe node1
peer probe: success
[root@master ~]# gluster peer probe node2
peer probe: success
[root@master ~]# gluster pool list
UUID Hostname State
2cb0d9ef-57c4-40ac-9805-f363209e2bf0 node1 Connected
60c6c251-6d1e-44aa-8192-e8278a1fc274 node2 Connected
bc6a2d93-6cca-4b4d-a414-2a19e45a46f3 localhost Connected
# glusterfs卷管理
分布式复制卷可以设置复制的数量,如replica设置的是2,那么就表示上传的文件会复制2份,比如上传10个文件实际上是上传了20个文件,起到一定的备份作用,这20个文件会随机分布在各个节点。
# 创建分布式复制卷
常用的卷类型是分布式复制卷,加上force参数是强制少于3个创建
[root@master ~]# gluster volume create k8s replica 2 master:/test1 master:/test2 node1:/test1 node1:/test2 force
# 启动卷
[root@master ~]# gluster volume start k8s
volume start: k8s: success
# 查看卷
[root@master ~]# gluster volume info k8s
Volume Name: k8s
Type: Distributed-Replicate
Volume ID: d47aad4d-eca7-474f-9c97-8776689a31ad
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: master:/test1
Brick2: master:/test2
Brick3: node1:/test1
Brick4: node1:/test2
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
# 挂载卷
[root@master ~]# mount -t glusterfs 192.168.0.128:/k8s /mnt
[root@master ~]# df -h | grep mnt
192.168.0.128:/k8s 40G 4.6G 34G 12% /mnt
# 分部署复制卷扩容
# 扩容前查看容量
[root@master ~]# df -h | grep mnt
192.168.0.128:/k8s 40G 4.6G 34G 12% /mntdf -h
# 扩容的命令
[root@master ~]# gluster volume add-brick k8s node2:/test1 node2:/test2 force
volume add-brick: success
# 扩容后查看容量
[root@master ~]# df -h | grep mnt
192.168.0.128:/k8s 59G 6.6G 50G 12% /mnt
# glusterfs做K8s的后端存储
# 创建endpoint
$ vim glusterfs-ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs
namespace: default
subsets:
- addresses:
- ip: 192.168.0.128
- ip: 192.168.0.37
- ip: 192.168.0.112
ports:
- port: 49152
protocol: TCP
[root@master glusterfs_py]# kubectl apply -f glusterfs-ep.yaml
endpoints/glusterfs created
[root@master glusterfs_py]# kubectl get endpoints
NAME ENDPOINTS AGE
glusterfs 192.168.0.128:49152,192.168.0.37:49152,192.168.0.112:49152 12s
kubernetes 192.168.0.128:6443
# 创建service
[root@master glusterfs_py]# cat glusterfs-service.yaml
apiVersion: v1
kind: Service
metadata:
name: glusterfs
spec:
ports:
- port: 49152
protocol: TCP
targetPort: 49152
type: ClusterIP
[root@master glusterfs_py]# kubectl apply -f glusterfs-service.yaml
service/glusterfs created
[root@master glusterfs_py]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
glusterfs ClusterIP 10.106.51.139 <none> 49152/TCP 4s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 85mkube
# 创建glusterfs类型pv验证
[root@master glusterfs_py]# cat glusterfs-service.yaml
apiVersion: v1
kind: Service
metadata:
name: glusterfs
spec:
ports:
- port: 49152
protocol: TCP
targetPort: 49152
type: ClusterIP
[root@master glusterfs_py]# kubectl apply -f glusterfs-service.yaml
service/glusterfs created
[root@master glusterfs_py]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
glusterfs ClusterIP 10.106.51.139 <none> 49152/TCP 4s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 85mkube
# 使用pvc
[root@master glusterfs_py]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Gi
[root@master glusterfs_py]# kubectl apply -f pvc.yaml
persistentvolumeclaim/glusterfs created
# 创建Pod使用glusterfs存储
[root@master glusterfs_py]# cat pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
hostPath: 80 # 映射宿主机的外部端口
volumeMounts:
- name: glusterfs
mountPath: /usr/share/nginx/html
volumes:
- name: glusterfs
persistentVolumeClaim:
claimName: glusterfs
[root@master glusterfs_py]# kubectl apply -f pod.yaml
pod/nginx created
[root@master glusterfs_py]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 109s
[root@master glusterfs_py]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 2m51s 10.244.166.129 node1 <none> <none>
[root@master glusterfs_py]# curl 10.244.166.129
Hello Word
上次更新: 2023/11/28, 22:03:59