aboutsummaryrefslogtreecommitdiff
path: root/weed/filer/filechunk_section.go
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2023-01-02 23:20:45 -0800
committerGitHub <noreply@github.com>2023-01-02 23:20:45 -0800
commitd4566d4aaa426b33015780c7cc18f887fc07cca4 (patch)
tree7c3b5cb3d9e54297b9d4213b67408f86149013f7 /weed/filer/filechunk_section.go
parent367353b936c450906e88e850c7d1e804f97c3560 (diff)
downloadseaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.tar.xz
seaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.zip
more solid weed mount (#4089)
* compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
Diffstat (limited to 'weed/filer/filechunk_section.go')
-rw-r--r--weed/filer/filechunk_section.go119
1 files changed, 119 insertions, 0 deletions
diff --git a/weed/filer/filechunk_section.go b/weed/filer/filechunk_section.go
new file mode 100644
index 000000000..60c919569
--- /dev/null
+++ b/weed/filer/filechunk_section.go
@@ -0,0 +1,119 @@
+package filer
+
+import (
+ "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "sync"
+)
+
+const SectionSize = 2 * 1024 * 1024 * 128 // 256MiB
+type SectionIndex int64
+type FileChunkSection struct {
+ sectionIndex SectionIndex
+ chunks []*filer_pb.FileChunk
+ visibleIntervals *IntervalList[*VisibleInterval]
+ chunkViews *IntervalList[*ChunkView]
+ reader *ChunkReadAt
+ lock sync.Mutex
+}
+
+func NewFileChunkSection(si SectionIndex) *FileChunkSection {
+ return &FileChunkSection{
+ sectionIndex: si,
+ }
+}
+
+func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error {
+ section.lock.Lock()
+ defer section.lock.Unlock()
+
+ start, stop := max(int64(section.sectionIndex)*SectionSize, chunk.Offset), min(((int64(section.sectionIndex)+1)*SectionSize), chunk.Offset+int64(chunk.Size))
+
+ section.chunks = append(section.chunks, chunk)
+
+ if section.visibleIntervals != nil {
+ MergeIntoVisibles(section.visibleIntervals, start, stop, chunk)
+ }
+
+ if section.visibleIntervals != nil {
+ section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks)
+ }
+
+ if section.chunkViews != nil {
+ MergeIntoChunkViews(section.chunkViews, start, stop, chunk)
+ }
+
+ return nil
+}
+
+func (section *FileChunkSection) setupForRead(group *ChunkGroup, fileSize int64) {
+ if section.visibleIntervals == nil {
+ section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize)
+ section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks)
+ if section.reader != nil {
+ _ = section.reader.Close()
+ section.reader = nil
+ }
+ }
+ if section.chunkViews == nil {
+ section.chunkViews = ViewFromVisibleIntervals(section.visibleIntervals, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize)
+ }
+
+ if section.reader == nil {
+ section.reader = NewChunkReaderAtFromClient(group.lookupFn, section.chunkViews, group.chunkCache, min(int64(section.sectionIndex+1)*SectionSize, fileSize))
+ }
+ section.reader.fileSize = fileSize
+}
+
+func (section *FileChunkSection) readDataAt(group *ChunkGroup, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) {
+ section.lock.Lock()
+ defer section.lock.Unlock()
+
+ section.setupForRead(group, fileSize)
+
+ return section.reader.ReadAtWithTime(buff, offset)
+}
+
+func (section *FileChunkSection) DataStartOffset(group *ChunkGroup, offset int64, fileSize int64) int64 {
+ section.lock.Lock()
+ defer section.lock.Unlock()
+
+ section.setupForRead(group, fileSize)
+
+ for x := section.visibleIntervals.Front(); x != nil; x = x.Next {
+ visible := x.Value
+ if visible.stop <= offset {
+ continue
+ }
+ if offset < visible.start {
+ return offset
+ }
+ return offset
+ }
+ return -1
+}
+
+func (section *FileChunkSection) NextStopOffset(group *ChunkGroup, offset int64, fileSize int64) int64 {
+ section.lock.Lock()
+ defer section.lock.Unlock()
+
+ section.setupForRead(group, fileSize)
+
+ isAfterOffset := false
+ for x := section.visibleIntervals.Front(); x != nil; x = x.Next {
+ visible := x.Value
+ if !isAfterOffset {
+ if visible.stop <= offset {
+ continue
+ }
+ isAfterOffset = true
+ }
+ if offset < visible.start {
+ return offset
+ }
+ // now visible.start <= offset
+ if offset < visible.stop {
+ offset = visible.stop
+ }
+ }
+ return offset
+}