aboutsummaryrefslogtreecommitdiff
path: root/weed/shell/command_ec_rebuild_test.go
diff options
context:
space:
mode:
authorLisandro Pin <lisandro.pin@proton.ch>2025-11-11 07:43:43 +0100
committerGitHub <noreply@github.com>2025-11-10 22:43:43 -0800
commit79fa87bad4d6455631356a5095f5b0e9fc06eef6 (patch)
tree4d37c29fc55a5786c7b025d1c9067675e9208817 /weed/shell/command_ec_rebuild_test.go
parentbf8e4f40e60e74ce03c2f497c6245e5d1460f1d3 (diff)
downloadseaweedfs-79fa87bad4d6455631356a5095f5b0e9fc06eef6.tar.xz
seaweedfs-79fa87bad4d6455631356a5095f5b0e9fc06eef6.zip
Rework parameters passing for functions within `ec.rebuild` (#7445)
* Rework parameters passing for functions within `ec.rebuild` This simplifies the overall codebase and allows to cleanly handle parallelization via waitgroups. * fix copy source * add tests * remove tests not useful * fmt * nil check --------- Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com> Co-authored-by: chrislu <chris.lu@gmail.com>
Diffstat (limited to 'weed/shell/command_ec_rebuild_test.go')
-rw-r--r--weed/shell/command_ec_rebuild_test.go309
1 files changed, 309 insertions, 0 deletions
diff --git a/weed/shell/command_ec_rebuild_test.go b/weed/shell/command_ec_rebuild_test.go
new file mode 100644
index 000000000..5ab431137
--- /dev/null
+++ b/weed/shell/command_ec_rebuild_test.go
@@ -0,0 +1,309 @@
+package shell
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
+ "github.com/seaweedfs/seaweedfs/weed/storage/needle"
+)
+
+// TestEcShardMapRegister tests that EC shards are properly registered
+func TestEcShardMapRegister(t *testing.T) {
+ ecShardMap := make(EcShardMap)
+
+ // Create test nodes with EC shards
+ node1 := newEcNode("dc1", "rack1", "node1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6})
+ node2 := newEcNode("dc1", "rack1", "node2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13})
+
+ ecShardMap.registerEcNode(node1, "c1")
+ ecShardMap.registerEcNode(node2, "c1")
+
+ // Verify volume 1 is registered
+ locations, found := ecShardMap[needle.VolumeId(1)]
+ if !found {
+ t.Fatal("Expected volume 1 to be registered")
+ }
+
+ // Check shard count
+ count := locations.shardCount()
+ if count != erasure_coding.TotalShardsCount {
+ t.Errorf("Expected %d shards, got %d", erasure_coding.TotalShardsCount, count)
+ }
+
+ // Verify shard distribution
+ for i := 0; i < 7; i++ {
+ if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
+ t.Errorf("Shard %d should be on node1", i)
+ }
+ }
+ for i := 7; i < erasure_coding.TotalShardsCount; i++ {
+ if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
+ t.Errorf("Shard %d should be on node2", i)
+ }
+ }
+}
+
+// TestEcShardMapShardCount tests shard counting
+func TestEcShardMapShardCount(t *testing.T) {
+ testCases := []struct {
+ name string
+ shardIds []uint32
+ expectedCount int
+ }{
+ {"all shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 14},
+ {"data shards only", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 10},
+ {"parity shards only", []uint32{10, 11, 12, 13}, 4},
+ {"missing some shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8}, 9},
+ {"single shard", []uint32{0}, 1},
+ {"no shards", []uint32{}, 0},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ locations := make(EcShardLocations, erasure_coding.MaxShardCount)
+ for _, shardId := range tc.shardIds {
+ locations[shardId] = []*EcNode{
+ newEcNode("dc1", "rack1", "node1", 100),
+ }
+ }
+
+ count := locations.shardCount()
+ if count != tc.expectedCount {
+ t.Errorf("Expected %d shards, got %d", tc.expectedCount, count)
+ }
+ })
+ }
+}
+
+// TestEcRebuilderEcNodeWithMoreFreeSlots tests the free slot selection
+func TestEcRebuilderEcNodeWithMoreFreeSlots(t *testing.T) {
+ testCases := []struct {
+ name string
+ nodes []*EcNode
+ expectedNode string
+ }{
+ {
+ name: "single node",
+ nodes: []*EcNode{
+ newEcNode("dc1", "rack1", "node1", 100),
+ },
+ expectedNode: "node1",
+ },
+ {
+ name: "multiple nodes - select highest",
+ nodes: []*EcNode{
+ newEcNode("dc1", "rack1", "node1", 50),
+ newEcNode("dc1", "rack1", "node2", 150),
+ newEcNode("dc1", "rack1", "node3", 100),
+ },
+ expectedNode: "node2",
+ },
+ {
+ name: "multiple nodes - same slots",
+ nodes: []*EcNode{
+ newEcNode("dc1", "rack1", "node1", 100),
+ newEcNode("dc1", "rack1", "node2", 100),
+ },
+ expectedNode: "node1", // Should return first one
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ erb := &ecRebuilder{
+ ecNodes: tc.nodes,
+ }
+
+ node := erb.ecNodeWithMoreFreeSlots()
+ if node == nil {
+ t.Fatal("Expected a node, got nil")
+ }
+
+ if node.info.Id != tc.expectedNode {
+ t.Errorf("Expected node %s, got %s", tc.expectedNode, node.info.Id)
+ }
+ })
+ }
+}
+
+// TestEcRebuilderEcNodeWithMoreFreeSlotsEmpty tests empty node list
+func TestEcRebuilderEcNodeWithMoreFreeSlotsEmpty(t *testing.T) {
+ erb := &ecRebuilder{
+ ecNodes: []*EcNode{},
+ }
+
+ node := erb.ecNodeWithMoreFreeSlots()
+ if node != nil {
+ t.Errorf("Expected nil for empty node list, got %v", node)
+ }
+}
+
+// TestRebuildEcVolumesInsufficientShards tests error handling for unrepairable volumes
+func TestRebuildEcVolumesInsufficientShards(t *testing.T) {
+ var logBuffer bytes.Buffer
+
+ // Create a volume with insufficient shards (less than DataShardsCount)
+ node1 := newEcNode("dc1", "rack1", "node1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4}) // Only 5 shards
+
+ erb := &ecRebuilder{
+ commandEnv: &CommandEnv{
+ env: make(map[string]string),
+ noLock: true, // Bypass lock check for unit test
+ },
+ ecNodes: []*EcNode{node1},
+ writer: &logBuffer,
+ }
+
+ err := erb.rebuildEcVolumes("c1")
+ if err == nil {
+ t.Fatal("Expected error for insufficient shards, got nil")
+ }
+
+ if !strings.Contains(err.Error(), "unrepairable") {
+ t.Errorf("Expected 'unrepairable' in error message, got: %s", err.Error())
+ }
+}
+
+// TestRebuildEcVolumesCompleteVolume tests that complete volumes are skipped
+func TestRebuildEcVolumesCompleteVolume(t *testing.T) {
+ var logBuffer bytes.Buffer
+
+ // Create a volume with all shards
+ node1 := newEcNode("dc1", "rack1", "node1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
+
+ erb := &ecRebuilder{
+ commandEnv: &CommandEnv{
+ env: make(map[string]string),
+ noLock: true, // Bypass lock check for unit test
+ },
+ ecNodes: []*EcNode{node1},
+ writer: &logBuffer,
+ applyChanges: false,
+ }
+
+ err := erb.rebuildEcVolumes("c1")
+ if err != nil {
+ t.Fatalf("Expected no error for complete volume, got: %v", err)
+ }
+
+ // The function should return quickly without attempting rebuild
+ // since the volume is already complete
+}
+
+// TestRebuildEcVolumesInsufficientSpace tests error handling for insufficient disk space
+func TestRebuildEcVolumesInsufficientSpace(t *testing.T) {
+ var logBuffer bytes.Buffer
+
+ // Create a volume with missing shards but insufficient free slots
+ node1 := newEcNode("dc1", "rack1", "node1", 5). // Only 5 free slots, need 14
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+
+ erb := &ecRebuilder{
+ commandEnv: &CommandEnv{
+ env: make(map[string]string),
+ noLock: true, // Bypass lock check for unit test
+ },
+ ecNodes: []*EcNode{node1},
+ writer: &logBuffer,
+ applyChanges: false,
+ }
+
+ err := erb.rebuildEcVolumes("c1")
+ if err == nil {
+ t.Fatal("Expected error for insufficient disk space, got nil")
+ }
+
+ if !strings.Contains(err.Error(), "disk space is not enough") {
+ t.Errorf("Expected 'disk space' in error message, got: %s", err.Error())
+ }
+}
+
+// TestMultipleNodesWithShards tests rebuild with shards distributed across multiple nodes
+func TestMultipleNodesWithShards(t *testing.T) {
+ ecShardMap := make(EcShardMap)
+
+ // Create 3 nodes with different shards
+ node1 := newEcNode("dc1", "rack1", "node1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
+ node2 := newEcNode("dc1", "rack1", "node2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{4, 5, 6, 7})
+ node3 := newEcNode("dc1", "rack1", "node3", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{8, 9})
+
+ ecShardMap.registerEcNode(node1, "c1")
+ ecShardMap.registerEcNode(node2, "c1")
+ ecShardMap.registerEcNode(node3, "c1")
+
+ locations := ecShardMap[needle.VolumeId(1)]
+ count := locations.shardCount()
+
+ // We have 10 shards total, which is enough for data shards
+ if count != 10 {
+ t.Errorf("Expected 10 shards, got %d", count)
+ }
+
+ // Verify each shard is on the correct node
+ for i := 0; i < 4; i++ {
+ if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
+ t.Errorf("Shard %d should be on node1", i)
+ }
+ }
+ for i := 4; i < 8; i++ {
+ if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
+ t.Errorf("Shard %d should be on node2", i)
+ }
+ }
+ for i := 8; i < 10; i++ {
+ if len(locations[i]) != 1 || locations[i][0].info.Id != "node3" {
+ t.Errorf("Shard %d should be on node3", i)
+ }
+ }
+}
+
+// TestDuplicateShards tests handling of duplicate shards on multiple nodes
+func TestDuplicateShards(t *testing.T) {
+ ecShardMap := make(EcShardMap)
+
+ // Create 2 nodes with overlapping shards (both have shard 0)
+ node1 := newEcNode("dc1", "rack1", "node1", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
+ node2 := newEcNode("dc1", "rack1", "node2", 100).
+ addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 4, 5, 6}) // Duplicate shard 0
+
+ ecShardMap.registerEcNode(node1, "c1")
+ ecShardMap.registerEcNode(node2, "c1")
+
+ locations := ecShardMap[needle.VolumeId(1)]
+
+ // Shard 0 should be on both nodes
+ if len(locations[0]) != 2 {
+ t.Errorf("Expected shard 0 on 2 nodes, got %d", len(locations[0]))
+ }
+
+ // Verify both nodes are registered for shard 0
+ foundNode1 := false
+ foundNode2 := false
+ for _, node := range locations[0] {
+ if node.info.Id == "node1" {
+ foundNode1 = true
+ }
+ if node.info.Id == "node2" {
+ foundNode2 = true
+ }
+ }
+ if !foundNode1 || !foundNode2 {
+ t.Error("Both nodes should have shard 0")
+ }
+
+ // Shard count should be 7 (unique shards: 0, 1, 2, 3, 4, 5, 6)
+ count := locations.shardCount()
+ if count != 7 {
+ t.Errorf("Expected 7 unique shards, got %d", count)
+ }
+}