Debian 12 bookworm
Sponsored Link

GlusterFS : Add Nodes (Bricks)2023/06/19

 
Add Nodes (Bricks) to existing Cluster.
                                  |
+----------------------+          |          +----------------------+
| [GlusterFS Server#1] |10.0.0.51 | 10.0.0.52| [GlusterFS Server#2] |
|   node01.srv.world   +----------+----------+   node02.srv.world   |
|                      |          |          |                      |
+----------------------+          |          +----------------------+
           ⇑                      |                      ⇑
     file1, file3 ...             |               file2, file4 ...
                                  |
+----------------------+          |
| [GlusterFS Server#3] |10.0.0.53 |
|   node03.srv.world   +----------+
|                      |
+----------------------+

[1]
Install GlusterFS to a New Node, refer to here, and then Create a directory for GlusterFS volume on the same Path with other Nodes.
[2] Add a New Node to existing Cluster on a node. (OK on any existing node)
# probe new node

root@node01:~#
gluster peer probe node03

peer probe: success.
# confirm status

root@node01:~#
gluster peer status

Number of Peers: 2

Hostname: node02
Uuid: b4a101f1-4c74-469a-a682-75a2d1354f06
State: Peer in Cluster (Connected)

Hostname: node03
Uuid: a8f82ac4-a623-4461-a679-f09cbda2fc0a
State: Peer in Cluster (Connected)

# confirm existing volume

root@node01:~#
gluster volume info


Volume Name: vol_distributed
Type: Distribute
Volume ID: f93730f9-cdf4-48a8-b0ed-d9fe1c053077
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node01:/glusterfs/distributed
Brick2: node02:/glusterfs/distributed
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on

# add new node

root@node01:~#
gluster volume add-brick vol_distributed node03:/glusterfs/distributed

volume add-brick: success
# confirm volume info

root@node01:~#
gluster volume info


Volume Name: vol_distributed
Type: Distribute
Volume ID: f93730f9-cdf4-48a8-b0ed-d9fe1c053077
Status: Started
Snapshot Count: 0
Number of Bricks: 3
Transport-type: tcp
Bricks:
Brick1: node01:/glusterfs/distributed
Brick2: node02:/glusterfs/distributed
Brick3: node03:/glusterfs/distributed
Options Reconfigured:
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on

# after adding new node, run rebalance volume

root@node01:~#
gluster volume rebalance vol_distributed fix-layout start

volume rebalance: vol_distributed: success: Rebalance on vol_distributed has been started successfully. Use rebalance status command to check status of the rebalance process.
ID: 7b981d2f-0ce0-4fb6-bc59-82a7e2d4da7c

# OK if [Status] turns to [completed]

root@node01:~#
gluster volume status

Status of volume: vol_distributed
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick node01:/glusterfs/distributed         60299     0          Y       11836
Brick node02:/glusterfs/distributed         51114     0          Y       5962
Brick node03:/glusterfs/distributed         51532     0          Y       5625

Task Status of Volume vol_distributed
------------------------------------------------------------------------------
Task                 : Rebalance
ID                   : 7b981d2f-0ce0-4fb6-bc59-82a7e2d4da7c
Status               : completed
Matched Content