diff options
| author | Chris Lu <chrislusf@users.noreply.github.com> | 2023-01-02 23:20:45 -0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-01-02 23:20:45 -0800 |
| commit | d4566d4aaa426b33015780c7cc18f887fc07cca4 (patch) | |
| tree | 7c3b5cb3d9e54297b9d4213b67408f86149013f7 /weed/mount/filehandle.go | |
| parent | 367353b936c450906e88e850c7d1e804f97c3560 (diff) | |
| download | seaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.tar.xz seaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.zip | |
more solid weed mount (#4089)
* compare chunks by timestamp
* fix slab clearing error
* fix test compilation
* move oldest chunk to sealed, instead of by fullness
* lock on fh.entryViewCache
* remove verbose logs
* revert slat clearing
* less logs
* less logs
* track write and read by timestamp
* remove useless logic
* add entry lock on file handle release
* use mem chunk only, swap file chunk has problems
* comment out code that maybe used later
* add debug mode to compare data read and write
* more efficient readResolvedChunks with linked list
* small optimization
* fix test compilation
* minor fix on writer
* add SeparateGarbageChunks
* group chunks into sections
* turn off debug mode
* fix tests
* fix tests
* tmp enable swap file chunk
* Revert "tmp enable swap file chunk"
This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7.
* simple refactoring
* simple refactoring
* do not re-use swap file chunk. Sealed chunks should not be re-used.
* comment out debugging facilities
* either mem chunk or swap file chunk is fine now
* remove orderedMutex as *semaphore.Weighted
not found impactful
* optimize size calculation for changing large files
* optimize performance to avoid going through the long list of chunks
* still problems with swap file chunk
* rename
* tiny optimization
* swap file chunk save only successfully read data
* fix
* enable both mem and swap file chunk
* resolve chunks with range
* rename
* fix chunk interval list
* also change file handle chunk group when adding chunks
* pick in-active chunk with time-decayed counter
* fix compilation
* avoid nil with empty fh.entry
* refactoring
* rename
* rename
* refactor visible intervals to *list.List
* refactor chunkViews to *list.List
* add IntervalList for generic interval list
* change visible interval to use IntervalList in generics
* cahnge chunkViews to *IntervalList[*ChunkView]
* use NewFileChunkSection to create
* rename variables
* refactor
* fix renaming leftover
* renaming
* renaming
* add insert interval
* interval list adds lock
* incrementally add chunks to readers
Fixes:
1. set start and stop offset for the value object
2. clone the value object
3. use pointer instead of copy-by-value when passing to interval.Value
4. use insert interval since adding chunk could be out of order
* fix tests compilation
* fix tests compilation
Diffstat (limited to 'weed/mount/filehandle.go')
| -rw-r--r-- | weed/mount/filehandle.go | 109 |
1 files changed, 52 insertions, 57 deletions
diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index b6ec3d2da..67298b047 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -5,50 +5,60 @@ import ( "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" - "golang.org/x/exp/slices" - "golang.org/x/sync/semaphore" - "math" + "os" "sync" ) type FileHandleId uint64 +var IsDebugFileReadWrite = false + type FileHandle struct { - fh FileHandleId - counter int64 - entry *LockedEntry - entryLock sync.Mutex - inode uint64 - wfs *WFS + fh FileHandleId + counter int64 + entry *LockedEntry + entryLock sync.Mutex + entryChunkGroup *filer.ChunkGroup + inode uint64 + wfs *WFS // cache file has been written to - dirtyMetadata bool - dirtyPages *PageWriter - entryViewCache []filer.VisibleInterval - reader *filer.ChunkReadAt - contentType string - handle uint64 - orderedMutex *semaphore.Weighted + dirtyMetadata bool + dirtyPages *PageWriter + reader *filer.ChunkReadAt + contentType string + handle uint64 + sync.Mutex isDeleted bool + + // for debugging + mirrorFile *os.File } func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle { fh := &FileHandle{ - fh: handleId, - counter: 1, - inode: inode, - wfs: wfs, - orderedMutex: semaphore.NewWeighted(int64(math.MaxInt64)), + fh: handleId, + counter: 1, + inode: inode, + wfs: wfs, } // dirtyPages: newContinuousDirtyPages(file, writeOnly), fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit) - if entry != nil { - entry.Attributes.FileSize = filer.FileSize(entry) - } fh.entry = &LockedEntry{ Entry: entry, } + if entry != nil { + fh.SetEntry(entry) + } + + if IsDebugFileReadWrite { + var err error + fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + println("failed to create mirror:", err.Error()) + } + } return fh } @@ -63,6 +73,17 @@ func (fh *FileHandle) GetEntry() *filer_pb.Entry { } func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { + if entry != nil { + fileSize := filer.FileSize(entry) + entry.Attributes.FileSize = fileSize + var resolveManifestErr error + fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks) + if resolveManifestErr != nil { + glog.Warningf("failed to resolve manifest chunks in %+v", entry) + } + } else { + glog.Fatalf("setting file handle entry to nil") + } fh.entry.SetEntry(entry) } @@ -78,43 +99,17 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { return } - // find the earliest incoming chunk - newChunks := chunks - earliestChunk := newChunks[0] - for i := 1; i < len(newChunks); i++ { - if lessThan(earliestChunk, newChunks[i]) { - earliestChunk = newChunks[i] - } - } - - // pick out-of-order chunks from existing chunks - for _, chunk := range fh.entry.GetChunks() { - if lessThan(earliestChunk, chunk) { - chunks = append(chunks, chunk) - } - } - - // sort incoming chunks - slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { - return lessThan(a, b) - }) - - glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks)) - - fh.entry.AppendChunks(newChunks) - fh.entryViewCache = nil + fh.entry.AppendChunks(chunks) } -func (fh *FileHandle) CloseReader() { - if fh.reader != nil { - _ = fh.reader.Close() - fh.reader = nil - } -} +func (fh *FileHandle) ReleaseHandle() { + fh.entryLock.Lock() + defer fh.entryLock.Unlock() -func (fh *FileHandle) Release() { fh.dirtyPages.Destroy() - fh.CloseReader() + if IsDebugFileReadWrite { + fh.mirrorFile.Close() + } } func lessThan(a, b *filer_pb.FileChunk) bool { |
