diff options
| author | Mike Tolman <mike.tolman@fidelissecurity.com> | 2016-08-05 15:14:24 -0600 |
|---|---|---|
| committer | Mike Tolman <mike.tolman@fidelissecurity.com> | 2016-08-05 15:14:24 -0600 |
| commit | 87fee21ef597a8b1bac5352d1327c13f87eeb000 (patch) | |
| tree | 49c3352402290a71839781713b423e0cb1e91c9e | |
| parent | 761ef1c73ec2829d56acef1f12696e20722cdf44 (diff) | |
| download | seaweedfs-87fee21ef597a8b1bac5352d1327c13f87eeb000.tar.xz seaweedfs-87fee21ef597a8b1bac5352d1327c13f87eeb000.zip | |
Changing needle_byte_cache so that it doesn't grow so big when larger files are added.
| -rw-r--r-- | weed/storage/needle_byte_cache.go | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/weed/storage/needle_byte_cache.go b/weed/storage/needle_byte_cache.go index ae35a48ba..930ead81d 100644 --- a/weed/storage/needle_byte_cache.go +++ b/weed/storage/needle_byte_cache.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/golang-lru" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -24,7 +25,7 @@ In caching, the string~[]byte mapping is cached */ func init() { bytesPool = util.NewBytesPool() - bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) { + bytesCache, _ = lru.NewWithEvict(50, func(key interface{}, value interface{}) { value.(*Block).decreaseReference() }) } @@ -46,22 +47,37 @@ func (block *Block) increaseReference() { // get bytes from the LRU cache of []byte first, then from the bytes pool // when []byte in LRU cache is evicted, it will be put back to the bytes pool func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) { + //Skip the cache if we are looking for a block that is too big to fit in the cache (defaulting to 10MB) + cacheable := readSize <= (1024*1024*10) + if !cacheable { + glog.V(4).Infoln("Block too big to keep in cache. Size:", readSize) + } + cacheKey := string("") + if cacheable { // check cache, return if found - cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize) + cacheKey = fmt.Sprintf("%d:%d:%d", r.Fd(), offset >> 3, readSize) if obj, found := bytesCache.Get(cacheKey); found { + glog.V(4).Infoln("Found block in cache. Size:", readSize) block = obj.(*Block) block.increaseReference() dataSlice = block.Bytes[0:readSize] return dataSlice, block, nil + } } // get the []byte from pool b := bytesPool.Get(readSize) // refCount = 2, one by the bytesCache, one by the actual needle object - block = &Block{Bytes: b, refCount: 2} + refCount := int32(1) + if cacheable { + refCount = 2 + } + block = &Block{Bytes: b, refCount: refCount} dataSlice = block.Bytes[0:readSize] _, err = r.ReadAt(dataSlice, offset) + if cacheable { bytesCache.Add(cacheKey, block) + } return dataSlice, block, err } |
