aboutsummaryrefslogtreecommitdiff
path: root/weed/filer/reader_cache.go
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2023-01-02 23:20:45 -0800
committerGitHub <noreply@github.com>2023-01-02 23:20:45 -0800
commitd4566d4aaa426b33015780c7cc18f887fc07cca4 (patch)
tree7c3b5cb3d9e54297b9d4213b67408f86149013f7 /weed/filer/reader_cache.go
parent367353b936c450906e88e850c7d1e804f97c3560 (diff)
downloadseaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.tar.xz
seaweedfs-d4566d4aaa426b33015780c7cc18f887fc07cca4.zip
more solid weed mount (#4089)
* compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
Diffstat (limited to 'weed/filer/reader_cache.go')
-rw-r--r--weed/filer/reader_cache.go7
1 files changed, 4 insertions, 3 deletions
diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go
index cb89c03c5..0a7c83de7 100644
--- a/weed/filer/reader_cache.go
+++ b/weed/filer/reader_cache.go
@@ -43,7 +43,7 @@ func newReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn
}
}
-func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
+func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
if rc.lookupFileIdFn == nil {
return
}
@@ -55,7 +55,8 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
return
}
- for _, chunkView := range chunkViews {
+ for x := chunkViews; x != nil; x = x.Next {
+ chunkView := x.Value
if _, found := rc.downloaders[chunkView.FileId]; found {
continue
}
@@ -65,7 +66,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) {
return
}
- // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.LogicOffset)
+ // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
// cache this chunk if not yet
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false)
go cacher.startCaching()