aboutsummaryrefslogtreecommitdiff
path: root/weed/command
diff options
context:
space:
mode:
authorGuo Lei <snipergg@163.com>2022-11-14 16:19:27 +0800
committerGitHub <noreply@github.com>2022-11-14 00:19:27 -0800
commit5b905fb2b74049437fcd939c0304a7e34d051234 (patch)
tree2c654eb74ee30f5c9e20a6c26de4ede1d04543d9 /weed/command
parentd90aa31d5f5b846c13bbeabfbd7d89260c21020c (diff)
downloadseaweedfs-5b905fb2b74049437fcd939c0304a7e34d051234.tar.xz
seaweedfs-5b905fb2b74049437fcd939c0304a7e34d051234.zip
Lazy loading (#3958)
* types packages is imported more than onece * lazy-loading * fix bugs * fix bugs * fix unit tests * fix test error * rename function * unload ldb after initial startup * Don't load ldb when starting volume server if ldbtimeout is set. * remove uncessary unloadldb * Update weed/command/server.go Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com> * Update weed/command/volume.go Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com> Co-authored-by: guol-fnst <goul-fnst@fujitsu.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
Diffstat (limited to 'weed/command')
-rw-r--r--weed/command/backup.go5
-rw-r--r--weed/command/compact.go2
-rw-r--r--weed/command/server.go1
-rw-r--r--weed/command/volume.go3
4 files changed, 8 insertions, 3 deletions
diff --git a/weed/command/backup.go b/weed/command/backup.go
index 3b14705d7..58eda638e 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -2,6 +2,7 @@ package command
import (
"fmt"
+
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"
@@ -113,7 +114,7 @@ func runBackup(cmd *Command, args []string) bool {
return true
}
}
- v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
+ v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
@@ -138,7 +139,7 @@ func runBackup(cmd *Command, args []string) bool {
// remove the old data
v.Destroy()
// recreate an empty volume
- v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
+ v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
diff --git a/weed/command/compact.go b/weed/command/compact.go
index 6c390ea1f..6d1f5d5f1 100644
--- a/weed/command/compact.go
+++ b/weed/command/compact.go
@@ -41,7 +41,7 @@ func runCompact(cmd *Command, args []string) bool {
preallocate := *compactVolumePreallocate * (1 << 20)
vid := needle.VolumeId(*compactVolumeId)
- v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0)
+ v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0)
if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
}
diff --git a/weed/command/server.go b/weed/command/server.go
index 526841e28..24c529e62 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -125,6 +125,7 @@ func init() {
serverOptions.v.readMode = cmdServer.Flag.String("volume.readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|read in remote node|redirect volume location'.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
+ serverOptions.v.ldbTimeout = cmdServer.Flag.Int64("volume.index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.")
serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
diff --git a/weed/command/volume.go b/weed/command/volume.go
index e0bc8b8fe..30337a62a 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -68,6 +68,7 @@ type VolumeServerOptions struct {
inflightUploadDataTimeout *time.Duration
hasSlowRead *bool
readBufferSizeMB *int
+ ldbTimeout *int64
}
func init() {
@@ -92,6 +93,7 @@ func init() {
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
+ v.ldbTimeout = cmdVolume.Flag.Int64("index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.")
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size")
v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
@@ -249,6 +251,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.inflightUploadDataTimeout,
*v.hasSlowRead,
*v.readBufferSizeMB,
+ *v.ldbTimeout,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)