aboutsummaryrefslogtreecommitdiff
path: root/weed/command/backup.go
diff options
context:
space:
mode:
authorChris Lu <chris.lu@gmail.com>2016-06-02 18:09:14 -0700
committerChris Lu <chris.lu@gmail.com>2016-06-02 18:09:14 -0700
commit5ce6bbf07672bf3f3c8d26cd2ce0e3e853a47c44 (patch)
tree2e4dd2ad0a618ab2b7cdebcdb9c503526c31e2e8 /weed/command/backup.go
parentcaeffa3998adc060fa66c4cd77af971ff2d26c57 (diff)
downloadseaweedfs-5ce6bbf07672bf3f3c8d26cd2ce0e3e853a47c44.tar.xz
seaweedfs-5ce6bbf07672bf3f3c8d26cd2ce0e3e853a47c44.zip
directory structure change to work with glide
glide has its own requirements. My previous workaround caused me some code checkin errors. Need to fix this.
Diffstat (limited to 'weed/command/backup.go')
-rw-r--r--weed/command/backup.go90
1 files changed, 90 insertions, 0 deletions
diff --git a/weed/command/backup.go b/weed/command/backup.go
new file mode 100644
index 000000000..0b3994027
--- /dev/null
+++ b/weed/command/backup.go
@@ -0,0 +1,90 @@
+package command
+
+import (
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+)
+
+var (
+ s BackupOptions
+)
+
+type BackupOptions struct {
+ master *string
+ collection *string
+ dir *string
+ volumeId *int
+}
+
+func init() {
+ cmdBackup.Run = runBackup // break init cycle
+ s.master = cmdBackup.Flag.String("server", "localhost:9333", "SeaweedFS master location")
+ s.collection = cmdBackup.Flag.String("collection", "", "collection name")
+ s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files")
+ s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
+}
+
+var cmdBackup = &Command{
+ UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
+ Short: "incrementally backup a volume to local folder",
+ Long: `Incrementally backup volume data.
+
+ It is expected that you use this inside a script, to loop through
+ all possible volume ids that needs to be backup to local folder.
+
+ The volume id does not need to exist locally or even remotely.
+ This will help to backup future new volumes.
+
+ Usually backing up is just copying the .dat (and .idx) files.
+ But it's tricky to incremententally copy the differences.
+
+ The complexity comes when there are multiple addition, deletion and compaction.
+ This tool will handle them correctly and efficiently, avoiding unnecessary data transporation.
+ `,
+}
+
+func runBackup(cmd *Command, args []string) bool {
+ if *s.volumeId == -1 {
+ return false
+ }
+ vid := storage.VolumeId(*s.volumeId)
+
+ // find volume location, replication, ttl info
+ lookup, err := operation.Lookup(*s.master, vid.String())
+ if err != nil {
+ fmt.Printf("Error looking up volume %d: %v\n", vid, err)
+ return true
+ }
+ volumeServer := lookup.Locations[0].Url
+
+ stats, err := operation.GetVolumeSyncStatus(volumeServer, vid.String())
+ if err != nil {
+ fmt.Printf("Error get volume %d status: %v\n", vid, err)
+ return true
+ }
+ ttl, err := storage.ReadTTL(stats.Ttl)
+ if err != nil {
+ fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
+ return true
+ }
+ replication, err := storage.NewReplicaPlacementFromString(stats.Replication)
+ if err != nil {
+ fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
+ return true
+ }
+
+ v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl)
+ if err != nil {
+ fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
+ return true
+ }
+
+ if err := v.Synchronize(volumeServer); err != nil {
+ fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
+ return true
+ }
+
+ return true
+}