安装部署GlusterFS分布式存储

准备环境

主机 IP 角色
backup 10.0.0.41 gfs节点
docker01 10.0.0.101 gfs节点
docker02 10.0.0.102 gfs节点
harbor 10.0.0.99 客户端节点

部署GlusterFS

## 安装glusterfs(所有节点)
yum -y install centos-release-gluster
yum -y install glusterfs-server

## 启动并加入开机自启
systemctl start glusterd
systemctl enable glusterd

## 每个节点进行时间同步
yum install ntpdate
ntpdate cn.ntp.org.cn

## 查看磁盘和分区信息
[root@backup ~]# fdisk -l
Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000c155d

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     1026047      512000   83  Linux
/dev/sda2         1026048     3123199     1048576   82  Linux swap / Solaris
/dev/sda3         3123200    41943039    19409920   83  Linux
## 在所有gfs-node节点新增一块硬盘(docker01 docker02 backup)
- 关机
- 添加磁盘

image-20231009182659638

image-20231009182843035

image-20231009182854198

image-20231009182909531

image-20231009182956238

## 查看磁盘和分区信息
[root@docker01 ~]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000c155d

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     1026047      512000   83  Linux
/dev/sda2         1026048     3123199     1048576   82  Linux swap / Solaris
/dev/sda3         3123200    41943039    19409920   83  Linux

Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

## 格式化硬盘(docker01 docker02 backup)
fdisk /dev/sdb 

Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x6a5b5325.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 
First sector (2048-41943039, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-41943039, default 41943039): 
Using default value 41943039
Partition 1 of type Linux and of size 20 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

## 格式化分区(docker01 docker02 backup)
mkfs.ext4 /dev/sdb1

## 执行挂载命令并查看挂载情况(永久挂载)(docker01 docker02 backup)
mkdir /sdb1
mount /dev/sdb1 /sdb1
[root@backup ~]# df -Th
Filesystem     Type      Size  Used Avail Use% Mounted on
devtmpfs       devtmpfs  476M     0  476M   0% /dev
tmpfs          tmpfs     487M     0  487M   0% /dev/shm
tmpfs          tmpfs     487M  7.7M  479M   2% /run
tmpfs          tmpfs     487M     0  487M   0% /sys/fs/cgroup
/dev/sda3      xfs        19G  1.7G   17G  10% /
/dev/sda1      xfs       497M  125M  373M  26% /boot
tmpfs          tmpfs      98M     0   98M   0% /run/user/0
/dev/sdb1      ext4       20G   45M   19G   1% /sdb1

## 启动glusterfsd并添加hosts解析
[root@docker01 ~]# systemctl start glusterfsd
[root@docker02 ~]# systemctl start glusterfsd
[root@backup ~]# systemctl start glusterfsd

[root@harbor ~]# echo "10.0.0.101 gfs-node1" >> /etc/hosts
[root@harbor ~]# echo "10.0.0.102 gfs-node2" >> /etc/hosts
[root@harbor ~]# echo "10.0.0.41 gfs-node3" >> /etc/hosts

[root@backup ~]# echo "10.0.0.101 gfs-node1" >> /etc/hosts
[root@backup ~]# echo "10.0.0.102 gfs-node2" >> /etc/hosts
[root@backup ~]# echo "10.0.0.41 gfs-node3" >> /etc/hosts

[root@docker01 ~]# echo "10.0.0.101 gfs-node1" >> /etc/hosts
[root@docker01 ~]# echo "10.0.0.102 gfs-node2" >> /etc/hosts
[root@docker01 ~]# echo "10.0.0.41 gfs-node3" >> /etc/hosts

[root@docker02 ~]# echo "10.0.0.101 gfs-node1" >> /etc/hosts
[root@docker02 ~]# echo "10.0.0.102 gfs-node2" >> /etc/hosts
[root@docker02 ~]# echo "10.0.0.41 gfs-node3" >> /etc/hosts

## 在任一节点上操作添加另外两个节点(在gfs-node1上操作)
[root@docker01 ~]# gluster peer probe gfs-node1 
peer probe: Probe on localhost not needed
[root@docker01 ~]# gluster peer probe gfs-node2
peer probe: success
[root@docker01 ~]# gluster peer probe gfs-node3
peer probe: success

[root@docker01 ~]# netstat -antp|grep gluster
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      853/glusterd        
tcp        0      0 10.0.0.101:49151        10.0.0.102:24007        ESTABLISHED 853/glusterd        
tcp        0      0 10.0.0.101:24007        10.0.0.41:49151         ESTABLISHED 853/glusterd        
tcp        0      0 10.0.0.101:49150        10.0.0.41:24007         ESTABLISHED 853/glusterd        
tcp        0      0 10.0.0.101:24007        10.0.0.102:49151        ESTABLISHED 853/glusterd 

## 检查gluster集群状态,在其中任意一个节点中可以看到另外两个节点信息
[root@docker01 ~]# gluster peer status 
Number of Peers: 2

Hostname: gfs-node2
Uuid: 34c6133f-a4f4-4c20-b154-0bed6acd4acb
State: Peer in Cluster (Connected)

Hostname: gfs-node3
Uuid: 49c04608-fcee-42ef-b399-819abb5cd657
State: Peer in Cluster (Connected)

[root@docker02 ~]# gluster peer status 
Number of Peers: 2

Hostname: gfs-node1
Uuid: 7097bd00-c28d-4b2b-acae-4611c4541c12
State: Peer in Cluster (Connected)

Hostname: gfs-node3
Uuid: 49c04608-fcee-42ef-b399-819abb5cd657
State: Peer in Cluster (Connected)

[root@backup ~]# gluster peer status 
Number of Peers: 2

Hostname: gfs-node1
Uuid: 7097bd00-c28d-4b2b-acae-4611c4541c12
State: Peer in Cluster (Connected)

Hostname: gfs-node2
Uuid: 34c6133f-a4f4-4c20-b154-0bed6acd4acb
State: Peer in Cluster (Connected)

创建分布式卷

## 创建分布式卷
[root@docker01 ~]# gluster volume create distribute-volume gfs-node1:/sdb1 gfs-node2:/sdb1 gfs-node3:/sdb1 force
volume create: distribute-volume: success: please start the volume to access data

[root@docker01 ~]# gluster volume list 
distribute-volume

[root@docker01 ~]# gluster volume info distribute-volume 

Volume Name: distribute-volume
Type: Distribute             #可以看到这个类型为分布式卷
Volume ID: 54e6c08c-1cd6-4599-a9f0-be86d37ed80c
Status: Created
Snapshot Count: 0
Number of Bricks: 3
Transport-type: tcp
Bricks:                      #共有三个brick存储服务器
Brick1: gfs-node1:/sdb1
Brick2: gfs-node2:/sdb1
Brick3: gfs-node3:/sdb1
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on

## 创建之后的卷需要要启用后才能使用,启动分布式卷
[root@docker01 ~]# gluster volume start distribute-volume
volume start: distribute-volume: success

[root@docker01 ~]# gluster volume status
Status of volume: distribute-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick gfs-node1:/sdb1                       49152     0          Y       8314 
Brick gfs-node2:/sdb1                       49152     0          Y       3162 
Brick gfs-node3:/sdb1                       49152     0          Y       2550 

Task Status of Volume distribute-volume
------------------------------------------------------------------------------
There are no active volume tasks

## 安装客户端
[root@harbor ~]# yum -y install glusterfs glusterfs-fuse

## 客户端执行目录挂载
[root@harbor ~]# mkdir -p /data/distribute-volume
[root@harbor ~]# mount.glusterfs gfs-node1:distribute-volume /data/distribute-volume
[root@harbor ~]# df -Th
gfs-node1:distribute-volume fuse.glusterfs   59G  737M   56G   2% /data/distribute-volume

## 在/data目录中产生一些测试文件
[root@harbor ~]# cd /data/
[root@harbor data]# dd if=/dev/zero of=test1.log bs=1M count=10
10+0 records in
10+0 records out
10485760 bytes (10 MB) copied, 0.00499322 s, 2.1 GB/s

[root@harbor data]# dd if=/dev/zero of=test2.log bs=1M count=10
10+0 records in
10+0 records out
10485760 bytes (10 MB) copied, 0.00368149 s, 2.8 GB/s

[root@harbor data]# dd if=/dev/zero of=test3.log bs=1M count=10
10+0 records in
10+0 records out
10485760 bytes (10 MB) copied, 0.00541283 s, 1.9 GB/s

## 将所有test文件移动到distribute-volume中,检查存储情况,因为创建的是分布式卷,文件会分别被存放在不同的节点中
[root@harbor data]# mv test1.log distribute-volume/
[root@harbor data]# mv test2.log distribute-volume/
[root@harbor data]# mv test3.log distribute-volume/

[root@docker01 ~]# ll /sdb1/
total 10260
drwx------ 2 root root    16384 Oct  9 18:39 lost+found
-rw-r--r-- 2 root root 10485760 Oct  9 19:01 test3.log
[root@docker02 ~]# ll /sdb1/
total 20504
drwx------ 2 root root    16384 Oct  9 18:39 lost+found
-rw-r--r-- 2 root root 10485760 Oct  9 19:00 test1.log
-rw-r--r-- 2 root root 10485760 Oct  9 19:00 test2.log
[root@backup ~]# ll /sdb1/
total 16
drwx------ 2 root root 16384 Oct  9 18:39 lost+found

创建复制式存储卷

## 创建磁盘(方法同上)

## 格式化
[root@docker01 ~]# fdisk /dev/sdc
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0xdc055445.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 
First sector (2048-41943039, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-41943039, default 41943039): 
Using default value 41943039
Partition 1 of type Linux and of size 20 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@docker01 ~]# mkfs.ext4 /dev/sdc1

[root@docker02 ~]# fdisk /dev/sdc
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0xdc055445.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 
First sector (2048-41943039, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-41943039, default 41943039): 
Using default value 41943039
Partition 1 of type Linux and of size 20 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@docker02 ~]# mkfs.ext4 /dev/sdc1

[root@backup ~]# fdisk /dev/sdc
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x3c6c395b.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 
First sector (2048-41943039, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-41943039, default 41943039): 
Using default value 41943039
Partition 1 of type Linux and of size 20 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@backup ~]# mkfs.ext4 /dev/sdc1

[root@docker01 ~]# mkdir /sdc1
[root@docker01 ~]# mount -a 

[root@docker02 ~]# mkdir /sdc1
[root@docker02 ~]# mount -a 

[root@backup ~]# mkdir /sdc1
[root@backup ~]# mount -a 

## 永久挂载
[root@docker01 ~]# echo "/dev/sdc1 /sdc1 ext4 defaults 0 0" >> /etc/fstab 
[root@docker02 ~]# echo "/dev/sdc1 /sdc1 ext4 defaults 0 0" >> /etc/fstab 
[root@backup ~]# echo "/dev/sdc1 /sdc1 ext4 defaults 0 0" >> /etc/fstab 

## 创建复制式存储卷
[root@docker01 ~]# gluster volume create replica-volume replica 3 gfs-node1:/sdc1 gfs-node2:/sdc1 gfs-node3:/sdc1 force
volume create: replica-volume: success: please start the volume to access data
[root@docker01 ~]#  gluster volume start replica-volume 
volume start: replica-volume: success
[root@docker01 ~]# gluster volume status 
Status of volume: distribute-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick gfs-node1:/sdb1                       49152     0          Y       8314 
Brick gfs-node2:/sdb1                       49152     0          Y       3162 
Brick gfs-node3:/sdb1                       49152     0          Y       2550 

Task Status of Volume distribute-volume
------------------------------------------------------------------------------
There are no active volume tasks

Status of volume: replica-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick gfs-node1:/sdc1                       49153     0          Y       13740
Brick gfs-node2:/sdc1                       49153     0          Y       4971 
Brick gfs-node3:/sdc1                       49153     0          Y       4163 
Self-heal Daemon on localhost               N/A       N/A        Y       13757
Self-heal Daemon on gfs-node3               N/A       N/A        Y       4180 
Self-heal Daemon on gfs-node2               N/A       N/A        Y       4988 

Task Status of Volume replica-volume
------------------------------------------------------------------------------
There are no active volume tasks

## 客户端挂载测试
[root@harbor data]# mkdir -p /data/replica-volume
[root@harbor data]# echo "gfs-node1:replica-volume /data/replica-volume glusterfs defaults 0 0" >> /etc/fstab
[root@harbor data]# mount -a
[root@harbor data]# cd /data/replica-volume/
[root@harbor replica-volume]# ll
total 0
[root@harbor replica-volume]# dd if=/dev/zero of=test-replica1.log bs=1M count=10 
10+0 records in
10+0 records out
10485760 bytes (10 MB) copied, 0.149423 s, 70.2 MB/s

## 前往各个节点进行查看,因为时复制卷,每个节点都有挂载点的内容
[root@docker01 ~]# ll /sdc1/
total 10240
-rw-r--r-- 2 root root 10485760 Oct  9 19:33 test-replica1.log
[root@docker02 ~]# ll /sdc1/
total 10240
-rw-r--r-- 2 root root 10485760 Oct  9 19:33 test-replica1.log
[root@backup ~]# ll /sdc1/
total 10240
-rw-r--r-- 2 root root 10485760 Oct  9 19:33 test-replica1.log