aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchrislu <chris.lu@gmail.com>2024-09-04 01:19:14 -0700
committerchrislu <chris.lu@gmail.com>2024-09-04 01:19:14 -0700
commit7367b976b05bfa69158a60f205dec970c48f50f0 (patch)
treeb2206ec7b05e34ed95c0255ceb6da5936efdc00b
parent66ac82bb8f8a5c5f095387689a503f2bc0edd002 (diff)
downloadseaweedfs-7367b976b05bfa69158a60f205dec970c48f50f0.tar.xz
seaweedfs-7367b976b05bfa69158a60f205dec970c48f50f0.zip
weed mount, weed dav add option to force cache
-rw-r--r--weed/command/mount.go2
-rw-r--r--weed/command/mount_std.go1
-rw-r--r--weed/command/webdav.go3
-rw-r--r--weed/filer/reader_at.go5
-rw-r--r--weed/filer/reader_cache.go4
-rw-r--r--weed/mount/weedfs.go3
-rw-r--r--weed/server/webdav_server.go3
-rw-r--r--weed/util/chunk_cache/chunk_cache.go6
-rw-r--r--weed/util/chunk_cache/chunk_cache_on_disk_test.go4
9 files changed, 22 insertions, 9 deletions
diff --git a/weed/command/mount.go b/weed/command/mount.go
index f5972fd30..6485d4c0e 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -20,6 +20,7 @@ type MountOptions struct {
cacheDirForRead *string
cacheDirForWrite *string
cacheSizeMBForRead *int64
+ forceCache *bool
dataCenter *string
allowOthers *bool
umaskString *string
@@ -58,6 +59,7 @@ func init() {
mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", "", "buffer writes mostly for large files")
+ mountOptions.forceCache = cmdMount.Flag.Bool("forceCache", true, "force to cache all reads")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index f85e7386d..4508f520f 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -235,6 +235,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
CacheDirForRead: *option.cacheDirForRead,
CacheSizeMBForRead: *option.cacheSizeMBForRead,
CacheDirForWrite: cacheDirForWrite,
+ ForceCache: *option.forceCache,
DataCenter: *option.dataCenter,
Quota: int64(*option.collectionQuota) * 1024 * 1024,
MountUid: uid,
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
index 1d1a43eda..b5f8a64d2 100644
--- a/weed/command/webdav.go
+++ b/weed/command/webdav.go
@@ -32,6 +32,7 @@ type WebDavOption struct {
tlsCertificate *string
cacheDir *string
cacheSizeMB *int64
+ forceCache *bool
maxMB *int
}
@@ -46,6 +47,7 @@ func init() {
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB")
+ webDavStandaloneOptions.forceCache = cmdWebDav.Flag.Bool("forceCache", false, "force to cache reads to local disk")
webDavStandaloneOptions.maxMB = cmdWebDav.Flag.Int("maxMB", 4, "split files larger than the limit")
webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server")
}
@@ -118,6 +120,7 @@ func (wo *WebDavOption) startWebDav() bool {
Cipher: cipher,
CacheDir: util.ResolvePath(*wo.cacheDir),
CacheSizeMB: *wo.cacheSizeMB,
+ ForceCache: *wo.forceCache,
MaxMB: *wo.maxMB,
})
if webdavServer_err != nil {
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
index d475e6e11..3d9f50d0e 100644
--- a/weed/filer/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -19,6 +19,7 @@ type ChunkReadAt struct {
fileSize int64
readerCache *ReaderCache
readerPattern *ReaderPattern
+ forceCache bool
lastChunkFid string
}
@@ -196,7 +197,9 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
if n > 0 {
return n, err
}
- return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
+ if !c.forceCache {
+ return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
+ }
}
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go
index fea2bbc89..a3df8e0bd 100644
--- a/weed/filer/reader_cache.go
+++ b/weed/filer/reader_cache.go
@@ -7,9 +7,9 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
- util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type ReaderCache struct {
@@ -69,7 +69,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
// cache this chunk if not yet
- cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false)
+ cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
go cacher.startCaching()
<-cacher.cacheStartedCh
rc.downloaders[chunkView.FileId] = cacher
diff --git a/weed/mount/weedfs.go b/weed/mount/weedfs.go
index a9fbd9380..181340ae3 100644
--- a/weed/mount/weedfs.go
+++ b/weed/mount/weedfs.go
@@ -41,6 +41,7 @@ type Option struct {
CacheDirForRead string
CacheSizeMBForRead int64
CacheDirForWrite string
+ ForceCache bool
DataCenter string
Umask os.FileMode
Quota int64
@@ -95,7 +96,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs.option.filerIndex = int32(rand.Intn(len(option.FilerAddresses)))
wfs.option.setupUniqueCacheDirectory()
if option.CacheSizeMBForRead > 0 {
- wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024)
+ wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024, option.ForceCache)
}
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index dbe6dfed5..d8aa6a640 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -38,6 +38,7 @@ type WebDavOption struct {
Cipher bool
CacheDir string
CacheSizeMB int64
+ ForceCache bool
MaxMB int
}
@@ -133,7 +134,7 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
os.MkdirAll(cacheDir, os.FileMode(0755))
- chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
+ chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024, option.ForceCache)
t := &WebDavFileSystem{
option: option,
chunkCache: chunkCache,
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
index 158f47cfc..7ed009b45 100644
--- a/weed/util/chunk_cache/chunk_cache.go
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -23,14 +23,16 @@ type TieredChunkCache struct {
onDiskCacheSizeLimit0 uint64
onDiskCacheSizeLimit1 uint64
onDiskCacheSizeLimit2 uint64
+ forceCache bool
}
var _ ChunkCache = &TieredChunkCache{}
-func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
+func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64, forceCache bool) *TieredChunkCache {
c := &TieredChunkCache{
- memCache: NewChunkCacheInMemory(maxEntries),
+ memCache: NewChunkCacheInMemory(maxEntries),
+ forceCache: forceCache,
}
c.diskCaches = make([]*OnDiskCacheLayer, 3)
c.onDiskCacheSizeLimit0 = uint64(unitSize)
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
index 14179beaa..8a517b486 100644
--- a/weed/util/chunk_cache/chunk_cache_on_disk_test.go
+++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
@@ -13,7 +13,7 @@ func TestOnDisk(t *testing.T) {
totalDiskSizeInKB := int64(32)
- cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
+ cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
writeCount := 5
type test_data struct {
@@ -61,7 +61,7 @@ func TestOnDisk(t *testing.T) {
cache.Shutdown()
- cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
+ cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
for i := 0; i < 2; i++ {
data := mem.Allocate(testData[i].size)