aboutsummaryrefslogtreecommitdiff
path: root/weed/util
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2020-04-13 21:58:10 -0700
committerChris Lu <chris.lu@gmail.com>2020-04-13 21:58:10 -0700
commitf282ed444baf6676c22df1b7c35964dd73d2c04a (patch)
tree69e594cba87ad2cba5303d02f77536916879aab8 /weed/util
parentd8f5985e5e2a51719b7bf40a99c2728b475dbbf4 (diff)
downloadseaweedfs-f282ed444baf6676c22df1b7c35964dd73d2c04a.tar.xz
seaweedfs-f282ed444baf6676c22df1b7c35964dd73d2c04a.zip
refactoring
Diffstat (limited to 'weed/util')
-rw-r--r--weed/util/chunk_cache/chunk_cache.go86
-rw-r--r--weed/util/chunk_cache/chunk_cache_on_disk_test.go6
-rw-r--r--weed/util/chunk_cache/on_disk_cache_layer.go83
3 files changed, 112 insertions, 63 deletions
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
index 7c4a77304..232e57a55 100644
--- a/weed/util/chunk_cache/chunk_cache.go
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -1,52 +1,39 @@
package chunk_cache
import (
- "fmt"
- "path"
- "sort"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
+const (
+ memCacheSizeLimit = 1024 * 1024
+)
+
// a global cache for recently accessed file chunks
type ChunkCache struct {
- memCache *ChunkCacheInMemory
- diskCaches []*ChunkCacheVolume
+ memCache *ChunkCacheInMemory
+ diskCache *OnDiskCacheLayer
sync.RWMutex
}
func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64, segmentCount int) *ChunkCache {
- c := &ChunkCache{
- memCache: NewChunkCacheInMemory(maxEntries),
- }
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
if volumeCount < segmentCount {
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
}
- for i := 0; i < volumeCount; i++ {
- fileName := path.Join(dir, fmt.Sprintf("cache_%d", i))
- diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
- if err != nil {
- glog.Errorf("failed to add cache %s : %v", fileName, err)
- } else {
- c.diskCaches = append(c.diskCaches, diskCache)
- }
+ c := &ChunkCache{
+ memCache: NewChunkCacheInMemory(maxEntries),
+ diskCache: NewOnDiskCacheLayer(dir, "cache", volumeCount, volumeSize),
}
- // keep newest cache to the front
- sort.Slice(c.diskCaches, func(i, j int) bool {
- return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
- })
-
return c
}
-func (c *ChunkCache) GetChunk(fileId string) (data []byte) {
+func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) {
if c == nil {
return
}
@@ -54,12 +41,15 @@ func (c *ChunkCache) GetChunk(fileId string) (data []byte) {
c.RLock()
defer c.RUnlock()
- return c.doGetChunk(fileId)
+ return c.doGetChunk(fileId, chunkSize)
}
-func (c *ChunkCache) doGetChunk(fileId string) (data []byte) {
- if data = c.memCache.GetChunk(fileId); data != nil {
- return data
+func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) {
+
+ if chunkSize < memCacheSizeLimit {
+ if data = c.memCache.GetChunk(fileId); data != nil {
+ return data
+ }
}
fid, err := needle.ParseFileIdFromString(fileId)
@@ -67,20 +57,9 @@ func (c *ChunkCache) doGetChunk(fileId string) (data []byte) {
glog.Errorf("failed to parse file id %s", fileId)
return nil
}
- for _, diskCache := range c.diskCaches {
- data, err = diskCache.GetNeedle(fid.Key)
- if err == storage.ErrorNotFound {
- continue
- }
- if err != nil {
- glog.Errorf("failed to read cache file %s id %s", diskCache.fileName, fileId)
- continue
- }
- if len(data) != 0 {
- return
- }
- }
- return nil
+
+ return c.diskCache.getChunk(fid.Key)
+
}
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
@@ -95,22 +74,8 @@ func (c *ChunkCache) SetChunk(fileId string, data []byte) {
func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
- c.memCache.SetChunk(fileId, data)
-
- if len(c.diskCaches) == 0 {
- return
- }
-
- if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
- t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
- if resetErr != nil {
- glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
- return
- }
- for i := len(c.diskCaches) - 1; i > 0; i-- {
- c.diskCaches[i] = c.diskCaches[i-1]
- }
- c.diskCaches[0] = t
+ if len(data) < memCacheSizeLimit {
+ c.memCache.SetChunk(fileId, data)
}
fid, err := needle.ParseFileIdFromString(fileId)
@@ -118,7 +83,8 @@ func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
glog.Errorf("failed to parse file id %s", fileId)
return
}
- c.diskCaches[0].WriteNeedle(fid.Key, data)
+
+ c.diskCache.setChunk(fid.Key, data)
}
@@ -128,7 +94,5 @@ func (c *ChunkCache) Shutdown() {
}
c.Lock()
defer c.Unlock()
- for _, diskCache := range c.diskCaches {
- diskCache.Shutdown()
- }
+ c.diskCache.shutdown()
}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
index f93daf5a7..63bcba2be 100644
--- a/weed/util/chunk_cache/chunk_cache_on_disk_test.go
+++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
@@ -23,6 +23,7 @@ func TestOnDisk(t *testing.T) {
type test_data struct {
data []byte
fileId string
+ size uint64
}
testData := make([]*test_data, writeCount)
for i := 0; i < writeCount; i++ {
@@ -31,12 +32,13 @@ func TestOnDisk(t *testing.T) {
testData[i] = &test_data{
data: buff,
fileId: fmt.Sprintf("1,%daabbccdd", i+1),
+ size: uint64(len(buff)),
}
cache.SetChunk(testData[i].fileId, testData[i].data)
}
for i := 0; i < writeCount; i++ {
- data := cache.GetChunk(testData[i].fileId)
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i)
}
@@ -47,7 +49,7 @@ func TestOnDisk(t *testing.T) {
cache = NewChunkCache(0, tmpDir, totalDiskSizeMb, segmentCount)
for i := 0; i < writeCount; i++ {
- data := cache.GetChunk(testData[i].fileId)
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i)
}
diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go
new file mode 100644
index 000000000..065188ac3
--- /dev/null
+++ b/weed/util/chunk_cache/on_disk_cache_layer.go
@@ -0,0 +1,83 @@
+package chunk_cache
+
+import (
+ "fmt"
+ "path"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+type OnDiskCacheLayer struct {
+ diskCaches []*ChunkCacheVolume
+}
+
+func NewOnDiskCacheLayer(dir, namePrefix string, volumeCount int, volumeSize int64) *OnDiskCacheLayer{
+ c := &OnDiskCacheLayer{}
+ for i := 0; i < volumeCount; i++ {
+ fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
+ diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
+ if err != nil {
+ glog.Errorf("failed to add cache %s : %v", fileName, err)
+ } else {
+ c.diskCaches = append(c.diskCaches, diskCache)
+ }
+ }
+
+ // keep newest cache to the front
+ sort.Slice(c.diskCaches, func(i, j int) bool {
+ return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
+ })
+
+ return c
+}
+
+func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
+
+ if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
+ t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
+ if resetErr != nil {
+ glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
+ return
+ }
+ for i := len(c.diskCaches) - 1; i > 0; i-- {
+ c.diskCaches[i] = c.diskCaches[i-1]
+ }
+ c.diskCaches[0] = t
+ }
+
+ c.diskCaches[0].WriteNeedle(needleId, data)
+
+}
+
+func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte){
+
+ var err error
+
+ for _, diskCache := range c.diskCaches {
+ data, err = diskCache.GetNeedle(needleId)
+ if err == storage.ErrorNotFound {
+ continue
+ }
+ if err != nil {
+ glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
+ continue
+ }
+ if len(data) != 0 {
+ return
+ }
+ }
+
+ return nil
+
+}
+
+func (c *OnDiskCacheLayer) shutdown(){
+
+ for _, diskCache := range c.diskCaches {
+ diskCache.Shutdown()
+ }
+
+}