aboutsummaryrefslogtreecommitdiff
path: root/weed/command
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2021-09-06 15:10:55 -0700
committerChris Lu <chris.lu@gmail.com>2021-09-06 15:10:55 -0700
commitc218ef20c760fafde03430cac2066586cd2cf4d0 (patch)
treee47cd01ddfb8d9d2f3958439011d9a5a1c010f15 /weed/command
parent1702ce539563cfd297175aa88891db3c48f19acf (diff)
downloadseaweedfs-c218ef20c760fafde03430cac2066586cd2cf4d0.tar.xz
seaweedfs-c218ef20c760fafde03430cac2066586cd2cf4d0.zip
filer.remote.sync: automatically detect the primary remote storage
Diffstat (limited to 'weed/command')
-rw-r--r--weed/command/filer_remote_sync.go63
-rw-r--r--weed/command/filer_remote_sync_buckets.go26
2 files changed, 60 insertions, 29 deletions
diff --git a/weed/command/filer_remote_sync.go b/weed/command/filer_remote_sync.go
index 18e8fcc3c..0324a8103 100644
--- a/weed/command/filer_remote_sync.go
+++ b/weed/command/filer_remote_sync.go
@@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
+ "os"
"time"
)
@@ -47,7 +48,7 @@ var (
func init() {
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
- remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
+ remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
remoteSyncOptions.createBucketAt = cmdFilerRemoteSynchronize.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
remoteSyncOptions.createBucketRandomSuffix = cmdFilerRemoteSynchronize.Flag.Bool("createBucketWithRandomSuffix", false, "add randomized suffix to bucket name to avoid conflicts")
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
@@ -66,9 +67,15 @@ var cmdFilerRemoteSynchronize = &Command{
There are two modes:
1)Write back one mounted folder to remote storage
+
weed filer.remote.sync -dir=/mount/s3_on_cloud
+
2)Watch /buckets folder and write back all changes.
Any new buckets will be created in this remote storage.
+
+ # if there is only one remote storage configured
+ weed filer.remote.sync
+ # if there are multiple remote storages configured
weed filer.remote.sync -createBucketAt=cloud1
`,
@@ -91,32 +98,18 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
*remoteSyncOptions.readChunkFromFiler,
)
- storageName := *remoteSyncOptions.createBucketAt
- if storageName != "" {
-
- remoteSyncOptions.bucketsDir = "/buckets"
- // check buckets again
- remoteSyncOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
- resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
- if err != nil {
- return err
- }
- remoteSyncOptions.bucketsDir = resp.DirBuckets
- return nil
- })
-
- fmt.Printf("synchronize %s, default new bucket creation in %s ...\n", remoteSyncOptions.bucketsDir, storageName)
- util.RetryForever("filer.remote.sync buckets "+storageName, func() error {
- return remoteSyncOptions.followBucketUpdatesAndUploadToRemote(filerSource)
- }, func(err error) bool {
- if err != nil {
- glog.Errorf("synchronize %s to %s: %v", remoteSyncOptions.bucketsDir, storageName, err)
- }
- return true
- })
- }
+ remoteSyncOptions.bucketsDir = "/buckets"
+ // check buckets again
+ remoteSyncOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
+ resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return err
+ }
+ remoteSyncOptions.bucketsDir = resp.DirBuckets
+ return nil
+ })
- if dir != "" {
+ if dir != "" && dir != remoteSyncOptions.bucketsDir {
fmt.Printf("synchronize %s to remote storage...\n", dir)
util.RetryForever("filer.remote.sync "+dir, func() error {
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
@@ -126,7 +119,25 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
}
return true
})
+ return true
}
+ // read filer remote storage mount mappings
+ if detectErr := remoteSyncOptions.collectRemoteStorageConf(); detectErr != nil {
+ fmt.Fprintf(os.Stderr, "read mount info: %v\n", detectErr)
+ return true
+ }
+
+ // synchronize /buckets folder
+ fmt.Printf("synchronize buckets in %s ...\n", remoteSyncOptions.bucketsDir)
+ util.RetryForever("filer.remote.sync buckets", func() error {
+ return remoteSyncOptions.followBucketUpdatesAndUploadToRemote(filerSource)
+ }, func(err error) bool {
+ if err != nil {
+ glog.Errorf("synchronize %s: %v", remoteSyncOptions.bucketsDir, err)
+ }
+ return true
+ })
return true
+
}
diff --git a/weed/command/filer_remote_sync_buckets.go b/weed/command/filer_remote_sync_buckets.go
index 92383614b..70f9f49c1 100644
--- a/weed/command/filer_remote_sync_buckets.go
+++ b/weed/command/filer_remote_sync_buckets.go
@@ -51,6 +51,19 @@ func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.
// this directory is imported from "remote.mount.buckets" or "remote.mount"
return nil
}
+ if option.mappings.PrimaryBucketStorageName != "" && *option.createBucketAt == "" {
+ *option.createBucketAt = option.mappings.PrimaryBucketStorageName
+ glog.V(0).Infof("%s is set as the primary remote storage", *option.createBucketAt)
+ }
+ if len(option.mappings.Mappings) == 1 && *option.createBucketAt == "" {
+ for k := range option.mappings.Mappings {
+ *option.createBucketAt = k
+ glog.V(0).Infof("%s is set as the only remote storage", *option.createBucketAt)
+ }
+ }
+ if *option.createBucketAt == "" {
+ return nil
+ }
remoteConf, found := option.remoteConfs[*option.createBucketAt]
if !found {
return fmt.Errorf("un-configured remote storage %s", *option.createBucketAt)
@@ -72,7 +85,7 @@ func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.
glog.V(0).Infof("create bucket %s", bucketName)
if err := client.CreateBucket(bucketName); err != nil {
- return err
+ return fmt.Errorf("create bucket %s in %s: %v", bucketName, remoteConf.Name, err)
}
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
@@ -95,12 +108,12 @@ func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.
client, remoteStorageMountLocation, err := option.findRemoteStorageClient(entry.Name)
if err != nil {
- return err
+ return fmt.Errorf("findRemoteStorageClient %s: %v", entry.Name, err)
}
glog.V(0).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket)
if err := client.DeleteBucket(remoteStorageMountLocation.Bucket); err != nil {
- return err
+ return fmt.Errorf("delete remote bucket %s: %v", remoteStorageMountLocation.Bucket, err)
}
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
@@ -351,6 +364,7 @@ func (option *RemoteSyncOptions) collectRemoteStorageConf() (err error) {
}
option.remoteConfs = make(map[string]*remote_pb.RemoteConf)
+ var lastConfName string
err = filer_pb.List(option, filer.DirectoryEtcRemote, "", func(entry *filer_pb.Entry, isLast bool) error {
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
return nil
@@ -360,8 +374,14 @@ func (option *RemoteSyncOptions) collectRemoteStorageConf() (err error) {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
}
option.remoteConfs[conf.Name] = conf
+ lastConfName = conf.Name
return nil
}, "", false, math.MaxUint32)
+ if option.mappings.PrimaryBucketStorageName == "" && len(option.remoteConfs) == 1 {
+ glog.V(0).Infof("%s is set to the default remote storage", lastConfName)
+ option.mappings.PrimaryBucketStorageName = lastConfName
+ }
+
return
}