diff options
| author | Chris Lu <chris.lu@gmail.com> | 2018-05-24 01:22:37 -0700 |
|---|---|---|
| committer | Chris Lu <chris.lu@gmail.com> | 2018-05-24 01:22:37 -0700 |
| commit | d773e11c7a65903b3ee1adea801a20f91cb0c7aa (patch) | |
| tree | 9e40f834e929d826c9ce5dacd9fa57ca0de57bc6 /weed/filer2 | |
| parent | 00d0274fd7c829f5d26c051f5832e0f602929b08 (diff) | |
| download | seaweedfs-d773e11c7a65903b3ee1adea801a20f91cb0c7aa.tar.xz seaweedfs-d773e11c7a65903b3ee1adea801a20f91cb0c7aa.zip | |
file handler directly read from volume servers
this mostly works fine now!
next: need to cache files to local disk
Diffstat (limited to 'weed/filer2')
| -rw-r--r-- | weed/filer2/filechunks.go | 18 | ||||
| -rw-r--r-- | weed/filer2/filechunks_test.go | 44 |
2 files changed, 37 insertions, 25 deletions
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 93cee81de..6bdfbd48e 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -52,7 +52,14 @@ func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused [] return } -func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*filer_pb.FileChunk) { +type ChunkView struct { + FileId string + Offset int64 + Size uint64 + LogicOffset int64 +} + +func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { visibles := nonOverlappingVisibleIntervals(chunks) @@ -60,10 +67,11 @@ func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views for _, chunk := range visibles { if chunk.start <= offset && offset < chunk.stop { - views = append(views, &filer_pb.FileChunk{ - FileId: chunk.fileId, - Offset: offset - chunk.start, // offset is the data starting location in this file id - Size: uint64(min(chunk.stop, stop) - offset), + views = append(views, &ChunkView{ + FileId: chunk.fileId, + Offset: offset - chunk.start, // offset is the data starting location in this file id + Size: uint64(min(chunk.stop, stop) - offset), + LogicOffset: offset, }) offset = min(chunk.stop, stop) } diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index 9e39477be..24897215e 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -151,7 +151,7 @@ func TestChunksReading(t *testing.T) { Chunks []*filer_pb.FileChunk Offset int64 Size int - Expected []*filer_pb.FileChunk + Expected []*ChunkView }{ // case 0: normal { @@ -162,10 +162,10 @@ func TestChunksReading(t *testing.T) { }, Offset: 0, Size: 250, - Expected: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc"}, - {Offset: 0, Size: 100, FileId: "asdf"}, - {Offset: 0, Size: 50, FileId: "fsad"}, + Expected: []*ChunkView{ + {Offset: 0, Size: 100, FileId: "abc", LogicOffset:0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset:100}, + {Offset: 0, Size: 50, FileId: "fsad", LogicOffset:200}, }, }, // case 1: updates overwrite full chunks @@ -176,8 +176,8 @@ func TestChunksReading(t *testing.T) { }, Offset: 50, Size: 100, - Expected: []*filer_pb.FileChunk{ - {Offset: 50, Size: 100, FileId: "asdf"}, + Expected: []*ChunkView{ + {Offset: 50, Size: 100, FileId: "asdf", LogicOffset:50}, }, }, // case 2: updates overwrite part of previous chunks @@ -188,9 +188,9 @@ func TestChunksReading(t *testing.T) { }, Offset: 25, Size: 50, - Expected: []*filer_pb.FileChunk{ - {Offset: 25, Size: 25, FileId: "asdf"}, - {Offset: 0, Size: 25, FileId: "abc"}, + Expected: []*ChunkView{ + {Offset: 25, Size: 25, FileId: "asdf", LogicOffset:25}, + {Offset: 0, Size: 25, FileId: "abc", LogicOffset:50}, }, }, // case 3: updates overwrite full chunks @@ -202,9 +202,9 @@ func TestChunksReading(t *testing.T) { }, Offset: 0, Size: 200, - Expected: []*filer_pb.FileChunk{ - {Offset: 0, Size: 50, FileId: "asdf"}, - {Offset: 0, Size: 150, FileId: "xxxx"}, + Expected: []*ChunkView{ + {Offset: 0, Size: 50, FileId: "asdf", LogicOffset:0}, + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset:50}, }, }, // case 4: updates far away from prev chunks @@ -216,8 +216,8 @@ func TestChunksReading(t *testing.T) { }, Offset: 0, Size: 400, - Expected: []*filer_pb.FileChunk{ - {Offset: 0, Size: 200, FileId: "asdf"}, + Expected: []*ChunkView{ + {Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0}, // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen }, }, @@ -231,9 +231,9 @@ func TestChunksReading(t *testing.T) { }, Offset: 0, Size: 220, - Expected: []*filer_pb.FileChunk{ - {Offset: 0, Size: 200, FileId: "asdf"}, - {Offset: 0, Size: 20, FileId: "abc"}, + Expected: []*ChunkView{ + {Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0}, + {Offset: 0, Size: 20, FileId: "abc", LogicOffset:200}, }, }, // case 6: same updates @@ -245,8 +245,8 @@ func TestChunksReading(t *testing.T) { }, Offset: 0, Size: 100, - Expected: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc"}, + Expected: []*ChunkView{ + {Offset: 0, Size: 100, FileId: "abc", LogicOffset:0}, }, }, } @@ -269,6 +269,10 @@ func TestChunksReading(t *testing.T) { t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", i, x, chunk.FileId, testcase.Expected[x].FileId) } + if chunk.LogicOffset != testcase.Expected[x].LogicOffset { + t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d", + i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset) + } } if len(chunks) != len(testcase.Expected) { t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected)) |
