aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--weed/command/command.go4
-rw-r--r--weed/command/filer_copy.go31
-rw-r--r--weed/command/scaffold.go3
-rw-r--r--weed/command/scaffold/master.toml2
-rw-r--r--weed/operation/assign_file_id.go4
-rw-r--r--weed/sequence/snowflake_sequencer.go5
-rw-r--r--weed/server/master_server.go4
-rw-r--r--weed/shell/command_s3_bucket_delete.go13
8 files changed, 47 insertions, 19 deletions
diff --git a/weed/command/command.go b/weed/command/command.go
index 18e53ad8c..0bac56442 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -8,15 +8,15 @@ import (
)
var Commands = []*Command{
- cmdBenchmark,
cmdBackup,
+ cmdBenchmark,
cmdCompact,
- cmdCopy,
cmdDownload,
cmdExport,
cmdFiler,
cmdFilerBackup,
cmdFilerCat,
+ cmdFilerCopy,
cmdFilerMetaBackup,
cmdFilerMetaTail,
cmdFilerReplicate,
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 9d21c40ef..722f64679 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -52,21 +52,21 @@ type CopyOptions struct {
}
func init() {
- cmdCopy.Run = runCopy // break init cycle
- cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
- copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
- copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
- copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
- copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
- copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
- copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
- copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
- copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
- copy.checkSize = cmdCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
- copy.verbose = cmdCopy.Flag.Bool("verbose", false, "print out details during copying")
+ cmdFilerCopy.Run = runCopy // break init cycle
+ cmdFilerCopy.IsDebug = cmdFilerCopy.Flag.Bool("debug", false, "verbose debug information")
+ copy.include = cmdFilerCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
+ copy.replication = cmdFilerCopy.Flag.String("replication", "", "replication type")
+ copy.collection = cmdFilerCopy.Flag.String("collection", "", "optional collection name")
+ copy.ttl = cmdFilerCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
+ copy.diskType = cmdFilerCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
+ copy.maxMB = cmdFilerCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
+ copy.concurrenctFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
+ copy.concurrenctChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
+ copy.checkSize = cmdFilerCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
+ copy.verbose = cmdFilerCopy.Flag.Bool("verbose", false, "print out details during copying")
}
-var cmdCopy = &Command{
+var cmdFilerCopy = &Command{
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
Short: "copy one or a list of files to a filer folder",
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
@@ -154,7 +154,7 @@ func runCopy(cmd *Command, args []string) bool {
}
copy.ttlSec = int32(ttl.Minutes()) * 60
- if *cmdCopy.IsDebug {
+ if *cmdFilerCopy.IsDebug {
grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
@@ -381,6 +381,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
if assignResult.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
}
+ if assignResult.Url == "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
+ }
return nil
})
})
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index b48cf0959..886c0ac5e 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -1,6 +1,7 @@
package command
import (
+ "fmt"
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
"io/ioutil"
"path/filepath"
@@ -56,7 +57,7 @@ func runScaffold(cmd *Command, args []string) bool {
if *outputPath != "" {
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
} else {
- println(content)
+ fmt.Println(content)
}
return true
}
diff --git a/weed/command/scaffold/master.toml b/weed/command/scaffold/master.toml
index f550f0ad6..60a7dbcfc 100644
--- a/weed/command/scaffold/master.toml
+++ b/weed/command/scaffold/master.toml
@@ -26,6 +26,8 @@ type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id se
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
+# when sequencer.type = snowflake, the snowflake id must be different from other masters
+sequencer_snowflake_id = 0 # any number between 1~1023
# configurations for tiered cloud storage
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index ffd3e4938..fabc820ff 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -73,6 +73,10 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest
ret.Error = resp.Error
ret.Auth = security.EncodedJwt(resp.Auth)
+ if resp.Error != "" {
+ return fmt.Errorf("assignRequest: %v", resp.Error)
+ }
+
return nil
})
diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go
index 300449fa0..381933b3a 100644
--- a/weed/sequence/snowflake_sequencer.go
+++ b/weed/sequence/snowflake_sequencer.go
@@ -13,8 +13,11 @@ type SnowflakeSequencer struct {
node *snowflake.Node
}
-func NewSnowflakeSequencer(nodeid string) (*SnowflakeSequencer, error) {
+func NewSnowflakeSequencer(nodeid string, snowflakeId int) (*SnowflakeSequencer, error) {
nodeid_hash := hash(nodeid) & 0x3ff
+ if snowflakeId != 0 {
+ nodeid_hash = uint32(snowflakeId)
+ }
glog.V(0).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash)
node, err := snowflake.NewNode(int64(nodeid_hash))
if err != nil {
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index 11dc95ded..9d222a342 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -28,6 +28,7 @@ import (
const (
SequencerType = "master.sequencer.type"
SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
+ SequencerSnowflakeId = "master.sequencer.sequencer_snowflake_id"
)
type MasterOption struct {
@@ -293,7 +294,8 @@ func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer
}
case "snowflake":
var err error
- seq, err = sequence.NewSnowflakeSequencer(fmt.Sprintf("%s:%d", option.Host, option.Port))
+ snowflakeId := v.GetInt(SequencerSnowflakeId)
+ seq, err = sequence.NewSnowflakeSequencer(fmt.Sprintf("%s:%d", option.Host, option.Port), snowflakeId)
if err != nil {
glog.Error(err)
seq = nil
diff --git a/weed/shell/command_s3_bucket_delete.go b/weed/shell/command_s3_bucket_delete.go
index a8d8c5c29..26953c249 100644
--- a/weed/shell/command_s3_bucket_delete.go
+++ b/weed/shell/command_s3_bucket_delete.go
@@ -1,8 +1,10 @@
package shell
import (
+ "context"
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"io"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -49,6 +51,17 @@ func (c *commandS3BucketDelete) Do(args []string, commandEnv *CommandEnv, writer
return fmt.Errorf("read buckets: %v", err)
}
+ // delete the collection directly first
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: *bucketName,
+ })
+ return err
+ })
+ if err != nil {
+ return
+ }
+
return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true, false, nil)
}