From 31fc7bb2e1fa6085e0d9f3309e8ec54641e1f70c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 27 Sep 2020 10:41:29 -0700 Subject: refactor adjust for faster test --- weed/util/chunk_cache/chunk_cache.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'weed/util/chunk_cache/chunk_cache.go') diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 2b0c635a1..b4687d037 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -7,12 +7,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle" ) -const ( - memCacheSizeLimit = 1024 * 1024 - onDiskCacheSizeLimit0 = memCacheSizeLimit - onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit -) - type ChunkCache interface { GetChunk(fileId string, minSize uint64) (data []byte) SetChunk(fileId string, data []byte) @@ -23,17 +17,21 @@ type TieredChunkCache struct { memCache *ChunkCacheInMemory diskCaches []*OnDiskCacheLayer sync.RWMutex + onDiskCacheSizeLimit0 uint64 + onDiskCacheSizeLimit1 uint64 } -func NewTieredChunkCache(maxEntries int64, dir string, diskSizeMB int64) *TieredChunkCache { +func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache { c := &TieredChunkCache{ memCache: NewChunkCacheInMemory(maxEntries), } c.diskCaches = make([]*OnDiskCacheLayer, 3) - c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4) - c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4) - c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4) + c.onDiskCacheSizeLimit0 = uint64(unitSize) + c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0 + c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeInUnit*unitSize/4, 4) + c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeInUnit*unitSize/4, 4) + c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeInUnit*unitSize/2, 4) return c } @@ -51,7 +49,7 @@ func (c *TieredChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) { - if minSize < memCacheSizeLimit { + if minSize < c.onDiskCacheSizeLimit0 { data = c.memCache.GetChunk(fileId) if len(data) >= int(minSize) { return data @@ -64,13 +62,13 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt return nil } - if minSize < onDiskCacheSizeLimit0 { + if minSize < c.onDiskCacheSizeLimit0 { data = c.diskCaches[0].getChunk(fid.Key) if len(data) >= int(minSize) { return data } } - if minSize < onDiskCacheSizeLimit1 { + if minSize < c.onDiskCacheSizeLimit1 { data = c.diskCaches[1].getChunk(fid.Key) if len(data) >= int(minSize) { return data @@ -101,7 +99,7 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) { func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { - if len(data) < memCacheSizeLimit { + if len(data) < int(c.onDiskCacheSizeLimit0) { c.memCache.SetChunk(fileId, data) } @@ -111,9 +109,9 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { return } - if len(data) < onDiskCacheSizeLimit0 { + if len(data) < int(c.onDiskCacheSizeLimit0) { c.diskCaches[0].setChunk(fid.Key, data) - } else if len(data) < onDiskCacheSizeLimit1 { + } else if len(data) < int(c.onDiskCacheSizeLimit1) { c.diskCaches[1].setChunk(fid.Key, data) } else { c.diskCaches[2].setChunk(fid.Key, data) -- cgit v1.2.3 From 9ad2dcca2b49b11e85c09df9ade18fb417e4e755 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 27 Sep 2020 11:42:51 -0700 Subject: more tests --- weed/util/chunk_cache/chunk_cache.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'weed/util/chunk_cache/chunk_cache.go') diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index b4687d037..8830353cc 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -29,9 +29,9 @@ func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, uni c.diskCaches = make([]*OnDiskCacheLayer, 3) c.onDiskCacheSizeLimit0 = uint64(unitSize) c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0 - c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeInUnit*unitSize/4, 4) - c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeInUnit*unitSize/4, 4) - c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeInUnit*unitSize/2, 4) + c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_2", diskSizeInUnit*unitSize/8, 2) + c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_3", diskSizeInUnit*unitSize/4+diskSizeInUnit*unitSize/8, 3) + c.diskCaches[2] = NewOnDiskCacheLayer(dir, "c2_2", diskSizeInUnit*unitSize/2, 2) return c } @@ -49,7 +49,7 @@ func (c *TieredChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) { - if minSize < c.onDiskCacheSizeLimit0 { + if minSize <= c.onDiskCacheSizeLimit0 { data = c.memCache.GetChunk(fileId) if len(data) >= int(minSize) { return data @@ -62,13 +62,13 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt return nil } - if minSize < c.onDiskCacheSizeLimit0 { + if minSize <= c.onDiskCacheSizeLimit0 { data = c.diskCaches[0].getChunk(fid.Key) if len(data) >= int(minSize) { return data } } - if minSize < c.onDiskCacheSizeLimit1 { + if minSize <= c.onDiskCacheSizeLimit1 { data = c.diskCaches[1].getChunk(fid.Key) if len(data) >= int(minSize) { return data @@ -99,7 +99,7 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) { func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { - if len(data) < int(c.onDiskCacheSizeLimit0) { + if len(data) <= int(c.onDiskCacheSizeLimit0) { c.memCache.SetChunk(fileId, data) } @@ -109,9 +109,9 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { return } - if len(data) < int(c.onDiskCacheSizeLimit0) { + if len(data) <= int(c.onDiskCacheSizeLimit0) { c.diskCaches[0].setChunk(fid.Key, data) - } else if len(data) < int(c.onDiskCacheSizeLimit1) { + } else if len(data) <= int(c.onDiskCacheSizeLimit1) { c.diskCaches[1].setChunk(fid.Key, data) } else { c.diskCaches[2].setChunk(fid.Key, data) -- cgit v1.2.3 From 62ce85610e2fcd08488ee6026266e617509f6d46 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 27 Sep 2020 11:58:48 -0700 Subject: skip caching too large chunks --- weed/util/chunk_cache/chunk_cache.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'weed/util/chunk_cache/chunk_cache.go') diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 8830353cc..608d605b1 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -19,6 +19,7 @@ type TieredChunkCache struct { sync.RWMutex onDiskCacheSizeLimit0 uint64 onDiskCacheSizeLimit1 uint64 + onDiskCacheSizeLimit2 uint64 } func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache { @@ -29,6 +30,7 @@ func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, uni c.diskCaches = make([]*OnDiskCacheLayer, 3) c.onDiskCacheSizeLimit0 = uint64(unitSize) c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0 + c.onDiskCacheSizeLimit2 = 2 * c.onDiskCacheSizeLimit1 c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_2", diskSizeInUnit*unitSize/8, 2) c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_3", diskSizeInUnit*unitSize/4+diskSizeInUnit*unitSize/8, 3) c.diskCaches[2] = NewOnDiskCacheLayer(dir, "c2_2", diskSizeInUnit*unitSize/2, 2) @@ -74,7 +76,7 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt return data } } - { + if minSize <= c.onDiskCacheSizeLimit2 { data = c.diskCaches[2].getChunk(fid.Key) if len(data) >= int(minSize) { return data @@ -113,7 +115,7 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { c.diskCaches[0].setChunk(fid.Key, data) } else if len(data) <= int(c.onDiskCacheSizeLimit1) { c.diskCaches[1].setChunk(fid.Key, data) - } else { + } else if len(data) <= int(c.onDiskCacheSizeLimit2) { c.diskCaches[2].setChunk(fid.Key, data) } -- cgit v1.2.3 From 707936f48218b907a6222b392a0083e31dccf901 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 3 Oct 2020 14:12:38 -0700 Subject: re-enable caching larger than 16MB revert https://github.com/chrislusf/seaweedfs/commit/62ce85610e2fcd08488ee6026266e617509f6d46 --- weed/util/chunk_cache/chunk_cache.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'weed/util/chunk_cache/chunk_cache.go') diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 608d605b1..3615aee0e 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -76,7 +76,7 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt return data } } - if minSize <= c.onDiskCacheSizeLimit2 { + { data = c.diskCaches[2].getChunk(fid.Key) if len(data) >= int(minSize) { return data @@ -115,7 +115,7 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { c.diskCaches[0].setChunk(fid.Key, data) } else if len(data) <= int(c.onDiskCacheSizeLimit1) { c.diskCaches[1].setChunk(fid.Key, data) - } else if len(data) <= int(c.onDiskCacheSizeLimit2) { + } else { c.diskCaches[2].setChunk(fid.Key, data) } -- cgit v1.2.3