diff options
| author | Chris Lu <chrislusf@users.noreply.github.com> | 2025-12-14 17:06:13 -0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-12-14 17:06:13 -0800 |
| commit | 7ed75784241ad8d7635113e3e173959f0e8446ae (patch) | |
| tree | 11c7eeb84c6aaed2ba328b0a46ed760a93fc277c /weed/storage/erasure_coding/ec_decoder_test.go | |
| parent | 8bdc4390a04604af79f91c7dce94e3b2b58442f7 (diff) | |
| download | seaweedfs-7ed75784241ad8d7635113e3e173959f0e8446ae.tar.xz seaweedfs-7ed75784241ad8d7635113e3e173959f0e8446ae.zip | |
fix(ec.decode): purge EC shards when volume is empty (#7749)HEADorigin/masterorigin/HEADmaster
* fix(ec.decode): purge EC shards when volume is empty
When an EC volume has no live entries (all deleted), ec.decode should not generate an empty normal volume. Instead, treat decode as a no-op and allow shard purge to proceed cleanly.\n\nFixes: #7748
* chore: address PR review comments
* test: cover live EC index + avoid magic string
* chore: harden empty-EC handling
- Make shard cleanup best-effort (collect errors)\n- Remove unreachable EOF handling in HasLiveNeedles\n- Add empty ecx test case\n- Share no-live-entries substring between server/client\n
* perf: parallelize EC shard unmount/delete across locations
* refactor: combine unmount+delete into single goroutine per location
* refactor: use errors.Join for multi-error aggregation
* refactor: use existing ErrorWaitGroup for parallel execution
* fix: capture loop variables + clarify SuperBlockSize safety
Diffstat (limited to 'weed/storage/erasure_coding/ec_decoder_test.go')
| -rw-r--r-- | weed/storage/erasure_coding/ec_decoder_test.go | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/weed/storage/erasure_coding/ec_decoder_test.go b/weed/storage/erasure_coding/ec_decoder_test.go new file mode 100644 index 000000000..625d55402 --- /dev/null +++ b/weed/storage/erasure_coding/ec_decoder_test.go @@ -0,0 +1,81 @@ +package erasure_coding_test + +import ( + "os" + "path/filepath" + "testing" + + erasure_coding "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" + "github.com/seaweedfs/seaweedfs/weed/storage/types" +) + +func TestHasLiveNeedles_AllDeletedIsFalse(t *testing.T) { + dir := t.TempDir() + + collection := "foo" + base := filepath.Join(dir, collection+"_1") + + // Build an ecx file with only deleted entries. + // ecx file entries are the same format as .idx entries. + ecx := makeNeedleMapEntry(types.NeedleId(1), types.Offset{}, types.TombstoneFileSize) + if err := os.WriteFile(base+".ecx", ecx, 0644); err != nil { + t.Fatalf("write ecx: %v", err) + } + + hasLive, err := erasure_coding.HasLiveNeedles(base) + if err != nil { + t.Fatalf("HasLiveNeedles: %v", err) + } + if hasLive { + t.Fatalf("expected no live entries") + } +} + +func TestHasLiveNeedles_WithLiveEntryIsTrue(t *testing.T) { + dir := t.TempDir() + + collection := "foo" + base := filepath.Join(dir, collection+"_1") + + // Build an ecx file containing at least one live entry. + // ecx file entries are the same format as .idx entries. + live := makeNeedleMapEntry(types.NeedleId(1), types.Offset{}, types.Size(1)) + if err := os.WriteFile(base+".ecx", live, 0644); err != nil { + t.Fatalf("write ecx: %v", err) + } + + hasLive, err := erasure_coding.HasLiveNeedles(base) + if err != nil { + t.Fatalf("HasLiveNeedles: %v", err) + } + if !hasLive { + t.Fatalf("expected live entries") + } +} + +func TestHasLiveNeedles_EmptyFileIsFalse(t *testing.T) { + dir := t.TempDir() + + base := filepath.Join(dir, "foo_1") + + // Create an empty ecx file. + if err := os.WriteFile(base+".ecx", []byte{}, 0644); err != nil { + t.Fatalf("write ecx: %v", err) + } + + hasLive, err := erasure_coding.HasLiveNeedles(base) + if err != nil { + t.Fatalf("HasLiveNeedles: %v", err) + } + if hasLive { + t.Fatalf("expected no live entries for empty file") + } +} + +func makeNeedleMapEntry(key types.NeedleId, offset types.Offset, size types.Size) []byte { + b := make([]byte, types.NeedleIdSize+types.OffsetSize+types.SizeSize) + types.NeedleIdToBytes(b[0:types.NeedleIdSize], key) + types.OffsetToBytes(b[types.NeedleIdSize:types.NeedleIdSize+types.OffsetSize], offset) + types.SizeToBytes(b[types.NeedleIdSize+types.OffsetSize:types.NeedleIdSize+types.OffsetSize+types.SizeSize], size) + return b +} |
