aboutsummaryrefslogtreecommitdiff
path: root/weed/shell/command_ec_common_test.go
diff options
context:
space:
mode:
authorLisandro Pin <lisandro.pin@proton.ch>2024-12-04 18:00:55 +0100
committerGitHub <noreply@github.com>2024-12-04 09:00:55 -0800
commit351efa134d8a3158026f6560da723b7440a7c62f (patch)
tree3d188ea26f34083bcf7a10df3e6744842ed4a062 /weed/shell/command_ec_common_test.go
parentb65eb2ec4569891495ae184bf6090e0e500961e3 (diff)
downloadseaweedfs-351efa134d8a3158026f6560da723b7440a7c62f.tar.xz
seaweedfs-351efa134d8a3158026f6560da723b7440a7c62f.zip
Account for replication placement settings when balancing EC shards across racks. (#6316)
Diffstat (limited to 'weed/shell/command_ec_common_test.go')
-rw-r--r--weed/shell/command_ec_common_test.go35
1 files changed, 20 insertions, 15 deletions
diff --git a/weed/shell/command_ec_common_test.go b/weed/shell/command_ec_common_test.go
index 76609c89d..29d7c2d4b 100644
--- a/weed/shell/command_ec_common_test.go
+++ b/weed/shell/command_ec_common_test.go
@@ -139,35 +139,40 @@ func TestVolumeIdToReplicaPlacement(t *testing.T) {
func TestPickRackToBalanceShardsInto(t *testing.T) {
testCases := []struct {
- topology *master_pb.TopologyInfo
- vid string
- wantOneOf []string
+ topology *master_pb.TopologyInfo
+ vid string
+ replicaPlacement string
+ wantOneOf []string
+ wantErr string
}{
// Non-EC volumes. We don't care about these, but the function should return all racks as a safeguard.
- {topologyEc, "", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}},
- {topologyEc, "6225", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}},
- {topologyEc, "6226", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}},
- {topologyEc, "6241", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}},
- {topologyEc, "6242", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}},
+ {topologyEc, "", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
+ {topologyEc, "6225", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
+ {topologyEc, "6226", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
+ {topologyEc, "6241", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
+ {topologyEc, "6242", "123", []string{"rack1", "rack2", "rack3", "rack4", "rack5", "rack6"}, ""},
// EC volumes.
- {topologyEc, "9577", []string{"rack1", "rack2", "rack3"}},
- {topologyEc, "10457", []string{"rack1"}},
- {topologyEc, "12737", []string{"rack2"}},
- {topologyEc, "14322", []string{"rack3"}},
+ {topologyEc, "9577", "", nil, "shards 1 >= replica placement limit for other racks (0)"},
+ {topologyEc, "9577", "111", nil, "shards 1 >= replica placement limit for other racks (1)"},
+ {topologyEc, "9577", "222", []string{"rack1", "rack2", "rack3"}, ""},
+ {topologyEc, "10457", "222", []string{"rack1"}, ""},
+ {topologyEc, "12737", "222", []string{"rack2"}, ""},
+ {topologyEc, "14322", "222", []string{"rack3"}, ""},
}
for _, tc := range testCases {
vid, _ := needle.NewVolumeId(tc.vid)
ecNodes, _ := collectEcVolumeServersByDc(tc.topology, "")
racks := collectRacks(ecNodes)
+ rp, _ := super_block.NewReplicaPlacementFromString(tc.replicaPlacement)
locations := ecNodes
rackToShardCount := countShardsByRack(vid, locations)
averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks))
- got, gotErr := pickRackToBalanceShardsInto(racks, rackToShardCount, nil, averageShardsPerEcRack)
- if gotErr != nil {
- t.Errorf("volume %q: %s", tc.vid, gotErr.Error())
+ got, gotErr := pickRackToBalanceShardsInto(racks, rackToShardCount, rp, averageShardsPerEcRack)
+ if err := errorCheck(gotErr, tc.wantErr); err != nil {
+ t.Errorf("volume %q: %s", tc.vid, err.Error())
continue
}