diff options
| author | bingoohuang <bingoo.huang@gmail.com> | 2021-04-26 17:19:35 +0800 |
|---|---|---|
| committer | bingoohuang <bingoo.huang@gmail.com> | 2021-04-26 17:19:35 +0800 |
| commit | d861cbd81b75b6684c971ac00e33685e6575b833 (patch) | |
| tree | 301805fef4aa5d0096bfb1510536f7a009b661e7 /weed | |
| parent | 70da715d8d917527291b35fb069fac077d17b868 (diff) | |
| parent | 4ee58922eff61a5a4ca29c0b4829b097a498549e (diff) | |
| download | seaweedfs-d861cbd81b75b6684c971ac00e33685e6575b833.tar.xz seaweedfs-d861cbd81b75b6684c971ac00e33685e6575b833.zip | |
Merge branch 'master' of https://github.com/bingoohuang/seaweedfs
Diffstat (limited to 'weed')
511 files changed, 58474 insertions, 11640 deletions
diff --git a/weed/Makefile b/weed/Makefile new file mode 100644 index 000000000..8f1257d09 --- /dev/null +++ b/weed/Makefile @@ -0,0 +1,39 @@ +BINARY = weed + +SOURCE_DIR = . + +all: debug_mount + +.PHONY : clean debug_mount + +clean: + go clean $(SOURCE_DIR) + rm -f $(BINARY) + +debug_shell: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- shell + +debug_mount: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets + +debug_server: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- server -dir=/Volumes/mobile_disk/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 + +debug_volume: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- volume -dir=/Volumes/mobile_disk/100 -port 8564 -max=30 -preStopSeconds=2 + +debug_webdav: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 webdav + +debug_s3: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 s3 + +debug_filer_copy: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h diff --git a/weed/command/backup.go b/weed/command/backup.go index 0f6bed225..207df770b 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -3,8 +3,6 @@ package command import ( "fmt" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" @@ -66,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { return false @@ -74,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool { vid := needle.VolumeId(*s.volumeId) // find volume location, replication, ttl info - lookup, err := operation.Lookup(*s.master, vid.String()) + lookup, err := operation.Lookup(func() string { return *s.master }, vid.String()) if err != nil { fmt.Printf("Error looking up volume %d: %v\n", vid, err) return true @@ -114,14 +112,14 @@ func runBackup(cmd *Command, args []string) bool { return true } } - v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) + v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true } if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { - if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil { + if err = v.Compact2(30*1024*1024*1024, 0); err != nil { fmt.Printf("Compact Volume before synchronizing %v\n", err) return true } @@ -139,7 +137,7 @@ func runBackup(cmd *Command, args []string) bool { // remove the old data v.Destroy() // recreate an empty volume - v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) + v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 26be1fe3a..4fedb55f1 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -2,7 +2,6 @@ package command import ( "bufio" - "context" "fmt" "io" "math" @@ -15,7 +14,6 @@ import ( "sync" "time" - "github.com/spf13/viper" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -37,10 +35,13 @@ type BenchmarkOptions struct { sequentialRead *bool collection *string replication *string + diskType *string cpuprofile *string maxCpu *int grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient + fsync *bool + useTcp *bool } var ( @@ -63,8 +64,11 @@ func init() { b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") + b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") + b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write") + b.useTcp = cmdBenchmark.Flag.Bool("useTcp", false, "send data via tcp") sharedBytes = make([]byte, 1024) } @@ -109,9 +113,9 @@ var ( func runBenchmark(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() } @@ -125,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ",")) go b.masterClient.KeepConnectedToMaster() b.masterClient.WaitUntilConnected() @@ -221,25 +225,37 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { random := rand.New(rand.NewSource(time.Now().UnixNano())) + volumeTcpClient := wdclient.NewVolumeTcpClient() + for id := range idChan { start := time.Now() fileSize := int64(*b.fileSize + random.Intn(64)) fp := &operation.FilePart{ - Reader: &FakeReader{id: uint64(id), size: fileSize}, + Reader: &FakeReader{id: uint64(id), size: fileSize, random: random}, FileSize: fileSize, MimeType: "image/bench", // prevent gzip benchmark content + Fsync: *b.fsync, } ar := &operation.VolumeAssignRequest{ Count: 1, Collection: *b.collection, Replication: *b.replication, + DiskType: *b.diskType, } - if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { + if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { + if *b.useTcp { + if uploadByTcp(volumeTcpClient, fp) { + fileIdLineChan <- fp.Fid + s.completed++ + s.transferred += fileSize + } else { + s.failed++ + } + } else if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -279,19 +295,29 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := b.masterClient.LookupFileId(fid) + var bytesRead int + var err error + urls, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") continue } - if bytesRead, err := util.Get(url); err == nil { + var bytes []byte + for _, url := range urls { + bytes, _, err = util.Get(url) + if err == nil { + break + } + } + bytesRead = len(bytes) + if err == nil { s.completed++ - s.transferred += int64(len(bytesRead)) + s.transferred += int64(bytesRead) readStats.addSample(time.Now().Sub(start)) } else { s.failed++ - fmt.Printf("Failed to read %s error:%v\n", url, err) + fmt.Printf("Failed to read %s error:%v\n", fid, err) } } } @@ -315,6 +341,17 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b } } +func uploadByTcp(volumeTcpClient *wdclient.VolumeTcpClient, fp *operation.FilePart) bool { + + err := volumeTcpClient.PutFileChunk(fp.Server, fp.Fid, uint32(fp.FileSize), fp.Reader) + if err != nil { + glog.Errorf("upload chunk err: %v", err) + return false + } + + return true +} + func readFileIds(fileName string, fileIdLineChan chan string) { file, err := os.Open(fileName) // For read access. if err != nil { @@ -353,7 +390,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) { } const ( - benchResolution = 10000 //0.1 microsecond + benchResolution = 10000 // 0.1 microsecond benchBucket = 1000000000 / benchResolution ) @@ -476,7 +513,7 @@ func (s *stats) printStats() { fmt.Printf("\nConnection Times (ms)\n") fmt.Printf(" min avg max std\n") fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10) - //printing percentiles + // printing percentiles fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n") percentiles := make([]int, len(percentages)) for i := 0; i < len(percentages); i++ { @@ -510,8 +547,9 @@ func (s *stats) printStats() { // a fake reader to generate content to upload type FakeReader struct { - id uint64 // an id number - size int64 // max bytes + id uint64 // an id number + size int64 // max bytes + random *rand.Rand } func (l *FakeReader) Read(p []byte) (n int, err error) { @@ -527,6 +565,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) { for i := 0; i < 8; i++ { p[i] = byte(l.id >> uint(i*8)) } + l.random.Read(p[8:]) } l.size -= int64(n) return diff --git a/weed/command/command.go b/weed/command/command.go index 79c00d4cd..b6efcead2 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -1,8 +1,8 @@ package command import ( - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "os" "strings" ) @@ -12,20 +12,28 @@ var Commands = []*Command{ cmdBackup, cmdCompact, cmdCopy, - cmdFix, + cmdDownload, + cmdExport, + cmdFiler, + cmdFilerBackup, + cmdFilerCat, + cmdFilerMetaBackup, + cmdFilerMetaTail, cmdFilerReplicate, - cmdServer, + cmdFilerSynchronize, + cmdFix, + cmdGateway, cmdMaster, - cmdFiler, + cmdMount, cmdS3, - cmdUpload, - cmdDownload, + cmdIam, + cmdMsgBroker, cmdScaffold, + cmdServer, cmdShell, + cmdUpload, cmdVersion, cmdVolume, - cmdExport, - cmdMount, cmdWebDav, } diff --git a/weed/command/compact.go b/weed/command/compact.go index 85313b749..92e25f474 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -4,6 +4,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -40,8 +41,7 @@ func runCompact(cmd *Command, args []string) bool { preallocate := *compactVolumePreallocate * (1 << 20) vid := needle.VolumeId(*compactVolumeId) - v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, - storage.NeedleMapInMemory, nil, nil, preallocate, 0) + v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } @@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(preallocate); err != nil { + if err = v.Compact2(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/download.go b/weed/command/download.go index b3e33defd..7bbff9448 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "path" "strings" @@ -43,15 +44,15 @@ var cmdDownload = &Command{ func runDownload(cmd *Command, args []string) bool { for _, fid := range args { - if e := downloadToFile(*d.server, fid, *d.dir); e != nil { + if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil { fmt.Println("Download Error: ", fid, e) } } return true } -func downloadToFile(server, fileId, saveDir string) error { - fileUrl, lookupError := operation.LookupFileId(server, fileId) +func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error { + fileUrl, lookupError := operation.LookupFileId(masterFn, fileId) if lookupError != nil { return lookupError } @@ -59,7 +60,7 @@ func downloadToFile(server, fileId, saveDir string) error { if err != nil { return err } - defer rc.Close() + defer util.CloseResponse(rc) if filename == "" { filename = fileId } @@ -75,14 +76,14 @@ func downloadToFile(server, fileId, saveDir string) error { } defer f.Close() if isFileList { - content, err := ioutil.ReadAll(rc) + content, err := ioutil.ReadAll(rc.Body) if err != nil { return err } fids := strings.Split(string(content), "\n") for _, partId := range fids { var n int - _, part, err := fetchContent(*d.server, partId) + _, part, err := fetchContent(masterFn, partId) if err == nil { n, err = f.Write(part) } @@ -94,7 +95,7 @@ func downloadToFile(server, fileId, saveDir string) error { } } } else { - if _, err = io.Copy(f, rc); err != nil { + if _, err = io.Copy(f, rc.Body); err != nil { return err } @@ -102,17 +103,17 @@ func downloadToFile(server, fileId, saveDir string) error { return nil } -func fetchContent(server string, fileId string) (filename string, content []byte, e error) { - fileUrl, lookupError := operation.LookupFileId(server, fileId) +func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) { + fileUrl, lookupError := operation.LookupFileId(masterFn, fileId) if lookupError != nil { return "", nil, lookupError } - var rc io.ReadCloser + var rc *http.Response if filename, _, rc, e = util.DownloadFile(fileUrl); e != nil { return "", nil, e } - content, e = ioutil.ReadAll(rc) - rc.Close() + defer util.CloseResponse(rc) + content, e = ioutil.ReadAll(rc.Body) return } diff --git a/weed/command/export.go b/weed/command/export.go index 8d664ad3b..1c32e1050 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -19,10 +19,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( - defaultFnFormat = `{{.Mime}}/{{.Id}}:{{.Name}}` + defaultFnFormat = `{{.Id}}_{{.Name}}{{.Ext}}` timeFormat = "2006-01-02T15:04:05" ) @@ -55,7 +56,7 @@ func init() { var ( output = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout") - format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Mime}} {{.Id}} {{.Name}} {{.Ext}}") + format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Id}} {{.Name}} {{.Ext}}") newer = cmdExport.Flag.String("newer", "", "export only files newer than this time, default is all files. Must be specified in RFC3339 without timezone, e.g. 2006-01-02T15:04:05") showDeleted = cmdExport.Flag.Bool("deleted", false, "export deleted files. only applies if -o is not specified") limit = cmdExport.Flag.Int("limit", 0, "only show first n entries if specified") @@ -69,21 +70,23 @@ var ( localLocation, _ = time.LoadLocation("Local") ) -func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) { +func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool, offset int64, onDiskSize int64) { key := needle.NewFileIdFromNeedle(vid, n).String() - size := n.DataSize + size := int32(n.DataSize) if version == needle.Version1 { - size = n.Size + size = int32(n.Size) } - fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n", + fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\t%d\t%d\n", key, n.Name, size, - n.IsGzipped(), + n.IsCompressed(), n.Mime, n.LastModifiedString(), n.Ttl.String(), deleted, + offset, + offset+onDiskSize, ) } @@ -108,9 +111,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in vid := scanner.vid nv, ok := needleMap.Get(n.Id) - glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", - n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset { + glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v", + n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv) + if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) @@ -123,17 +126,17 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in if tarOutputFile != nil { return writeFile(vid, n) } else { - printNeedle(vid, n, scanner.version, false) + printNeedle(vid, n, scanner.version, false, offset, n.DiskSize(scanner.version)) return nil } } if !ok { if *showDeleted && tarOutputFile == nil { if n.DataSize > 0 { - printNeedle(vid, n, scanner.version, true) + printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) } else { n.Name = []byte("*tombstone") - printNeedle(vid, n, scanner.version, true) + printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) } } glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size) @@ -195,7 +198,9 @@ func runExport(cmd *Command, args []string) bool { vid := needle.VolumeId(*export.volumeId) needleMap := needle_map.NewMemDb() - if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { + defer needleMap.Close() + + if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil { glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } @@ -205,12 +210,12 @@ func runExport(cmd *Command, args []string) bool { } if tarOutputFile == nil { - fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\n") + fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\tstart\tstop\n") } - err = storage.ScanVolumeFile(*export.dir, *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) + err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) if err != nil && err != io.EOF { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + glog.Errorf("Export Volume File [ERROR] %s\n", err) } return true } @@ -240,8 +245,11 @@ func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) { fileName := fileNameTemplateBuffer.String() - if n.IsGzipped() && path.Ext(fileName) != ".gz" { - fileName = fileName + ".gz" + if n.IsCompressed() { + if util.IsGzippedContent(n.Data) && path.Ext(fileName) != ".gz" { + fileName = fileName + ".gz" + } + // TODO other compression method } tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data)) diff --git a/weed/command/filer.go b/weed/command/filer.go index b1ceb46f5..a723b4d8a 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -1,58 +1,102 @@ package command import ( + "fmt" "net/http" + "os" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( - f FilerOptions + f FilerOptions + filerStartS3 *bool + filerS3Options S3Options + filerStartWebDav *bool + filerWebDavOptions WebDavOption + filerStartIam *bool + filerIamOptions IamOptions ) type FilerOptions struct { masters *string ip *string + bindIp *string port *int publicPort *int collection *string defaultReplicaPlacement *string - redirectOnRead *bool disableDirListing *bool maxMB *int dirListingLimit *int dataCenter *string + rack *string enableNotification *bool disableHttp *bool - - // default leveldb directory, used in "weed server" mode + cipher *bool + peers *string + metricsHttpPort *int + saveToFilerLimit *int defaultLevelDbDirectory *string + concurrentUploadLimitMB *int } func init() { cmdFiler.Run = runFiler // break init cycle f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers") - f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection") - f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") + f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection") + f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address") + f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") - f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") - f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") + f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") - f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") + f.maxMB = cmdFiler.Flag.Int("maxMB", 4, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") - f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") + f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") + f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") + f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") + f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") + f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") + f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") + + // start s3 on filer + filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway") + filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port") + filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") + filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file") + filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file") + filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders") + + // start webdav on filer + filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway") + filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port") + filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files") + filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files") + filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") + filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file") + filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") + filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") + filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB") + + // start iam on filer + filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service") + filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port") } var cmdFiler = &Command{ @@ -69,7 +113,8 @@ var cmdFiler = &Command{ //return a json format subdirectory and files listing GET /path/to/ - The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order. + If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir". The example filer.toml configuration file can be generated by "weed scaffold -config=filer" @@ -80,6 +125,37 @@ func runFiler(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) + go stats_collect.StartMetricsServer(*f.metricsHttpPort) + + filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port) + startDelay := time.Duration(2) + if *filerStartS3 { + filerS3Options.filer = &filerAddress + go func() { + time.Sleep(startDelay * time.Second) + filerS3Options.startS3Server() + }() + startDelay++ + } + + if *filerStartWebDav { + filerWebDavOptions.filer = &filerAddress + go func() { + time.Sleep(startDelay * time.Second) + filerWebDavOptions.startWebDav() + }() + startDelay++ + } + + if *filerStartIam { + filerIamOptions.filer = &filerAddress + filerIamOptions.masters = f.masters + go func() { + time.Sleep(startDelay * time.Second) + filerIamOptions.startIamServer() + }() + } + f.startFiler() return true @@ -94,31 +170,38 @@ func (fo *FilerOptions) startFiler() { publicVolumeMux = http.NewServeMux() } - defaultLevelDbDirectory := "./filerldb2" - if fo.defaultLevelDbDirectory != nil { - defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2" + defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2") + + var peers []string + if *fo.peers != "" { + peers = strings.Split(*fo.peers, ",") } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*fo.masters, ","), - Collection: *fo.collection, - DefaultReplication: *fo.defaultReplicaPlacement, - RedirectOnRead: *fo.redirectOnRead, - DisableDirListing: *fo.disableDirListing, - MaxMB: *fo.maxMB, - DirListingLimit: *fo.dirListingLimit, - DataCenter: *fo.dataCenter, - DefaultLevelDbDir: defaultLevelDbDirectory, - DisableHttp: *fo.disableHttp, - Port: *fo.port, + Masters: strings.Split(*fo.masters, ","), + Collection: *fo.collection, + DefaultReplication: *fo.defaultReplicaPlacement, + DisableDirListing: *fo.disableDirListing, + MaxMB: *fo.maxMB, + DirListingLimit: *fo.dirListingLimit, + DataCenter: *fo.dataCenter, + Rack: *fo.rack, + DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, + Host: *fo.ip, + Port: uint32(*fo.port), + Cipher: *fo.cipher, + SaveToFilerLimit: int64(*fo.saveToFilerLimit), + Filers: peers, + ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) } if *fo.publicPort != 0 { - publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort) - glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress) + publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort) + glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, 0) if e != nil { glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) @@ -130,9 +213,9 @@ func (fo *FilerOptions) startFiler() { }() } - glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port) + glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) filerListener, e := util.NewListener( - *fo.ip+":"+strconv.Itoa(*fo.port), + *fo.bindIp+":"+strconv.Itoa(*fo.port), time.Duration(10)*time.Second, ) if e != nil { @@ -141,11 +224,11 @@ func (fo *FilerOptions) startFiler() { // starting grpc server grpcPort := *fo.port + 10000 - grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0) + grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go new file mode 100644 index 000000000..888b46fe7 --- /dev/null +++ b/weed/command/filer_backup.go @@ -0,0 +1,157 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "io" + "time" +) + +type FilerBackupOptions struct { + isActivePassive *bool + filer *string + path *string + debug *bool + proxyByFiler *bool + timeAgo *time.Duration +} + +var ( + filerBackupOptions FilerBackupOptions +) + +func init() { + cmdFilerBackup.Run = runFilerBackup // break init cycle + filerBackupOptions.filer = cmdFilerBackup.Flag.String("filer", "localhost:8888", "filer of one SeaweedFS cluster") + filerBackupOptions.path = cmdFilerBackup.Flag.String("filerPath", "/", "directory to sync on filer") + filerBackupOptions.proxyByFiler = cmdFilerBackup.Flag.Bool("filerProxy", false, "read and write file chunks by filer instead of volume servers") + filerBackupOptions.debug = cmdFilerBackup.Flag.Bool("debug", false, "debug mode to print out received files") + filerBackupOptions.timeAgo = cmdFilerBackup.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") +} + +var cmdFilerBackup = &Command{ + UsageLine: "filer.backup -filer=<filerHost>:<filerPort> ", + Short: "resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml", + Long: `resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml + + filer.backup listens on filer notifications. If any file is updated, it will fetch the updated content, + and write to the destination. This is to replace filer.replicate command since additional message queue is not needed. + + If restarted and "-timeAgo" is not set, the synchronization will resume from the previous checkpoints, persisted every minute. + A fresh sync will start from the earliest metadata logs. To reset the checkpoints, just set "-timeAgo" to a high value. + +`, +} + +func runFilerBackup(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + util.LoadConfiguration("security", false) + util.LoadConfiguration("replication", true) + + for { + err := doFilerBackup(grpcDialOption, &filerBackupOptions) + if err != nil { + glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err) + time.Sleep(1747 * time.Millisecond) + } + } + + return true +} + +const ( + BackupKeyPrefix = "backup." +) + +func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions) error { + + // find data sink + config := util.GetViper() + dataSink := findSink(config) + if dataSink == nil { + return fmt.Errorf("no data sink configured in replication.toml") + } + + sourceFiler := *backupOption.filer + sourcePath := *backupOption.path + timeAgo := *backupOption.timeAgo + targetPath := dataSink.GetSinkToDirectory() + debug := *backupOption.debug + + // get start time for the data sink + startFrom := time.Unix(0, 0) + sinkId := util.HashStringToLong(dataSink.GetName() + dataSink.GetSinkToDirectory()) + if timeAgo.Milliseconds() == 0 { + lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId)) + if err != nil { + glog.V(0).Infof("starting from %v", startFrom) + } else { + startFrom = time.Unix(0, lastOffsetTsNs) + glog.V(0).Infof("resuming from %v", startFrom) + } + } else { + startFrom = time.Now().Add(-timeAgo) + glog.V(0).Infof("start time is set to %v", startFrom) + } + + // create filer sink + filerSource := &source.FilerSource{} + filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, *backupOption.proxyByFiler) + dataSink.SetSourceFiler(filerSource) + + processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug) + + return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "backup_" + dataSink.GetName(), + PathPrefix: sourcePath, + SinceNs: startFrom.UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("processEventFn: %v", err) + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err := setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), resp.TsNs); err != nil { + return fmt.Errorf("setOffset: %v", err) + } + } + + } + + }) + +} diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go new file mode 100644 index 000000000..c4281feba --- /dev/null +++ b/weed/command/filer_cat.go @@ -0,0 +1,118 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" + "math" + "net/url" + "os" + "strings" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + filerCat FilerCatOptions +) + +type FilerCatOptions struct { + grpcDialOption grpc.DialOption + filerAddress string + filerClient filer_pb.SeaweedFilerClient + output *string +} + +func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType { + return func(fileId string) (targetUrls []string, err error) { + vid := filer.VolumeId(fileId) + resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return nil, err + } + locations := resp.LocationsMap[vid] + for _, loc := range locations.Locations { + targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId)) + } + return + } +} + +func init() { + cmdFilerCat.Run = runFilerCat // break init cycle + filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout") +} + +var cmdFilerCat = &Command{ + UsageLine: "filer.cat [-o <file>] http://localhost:8888/path/to/file", + Short: "copy one file to local", + Long: `read one file to stdout or write to a file + +`, +} + +func runFilerCat(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + if len(args) == 0 { + return false + } + filerSource := args[len(args)-1] + + filerUrl, err := url.Parse(filerSource) + if err != nil { + fmt.Printf("The last argument should be a URL on filer: %v\n", err) + return false + } + urlPath := filerUrl.Path + if strings.HasSuffix(urlPath, "/") { + fmt.Printf("The last argument should be a file: %v\n", err) + return false + } + + filerCat.filerAddress = filerUrl.Host + filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + dir, name := util.FullPath(urlPath).DirAndName() + + writer := os.Stdout + if *filerCat.output != "" { + + fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output) + + f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) + if err != nil { + fmt.Printf("open file %s: %v\n", *filerCat.output, err) + return false + } + defer f.Close() + writer = f + } + + pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := filer_pb.LookupEntry(client, request) + if err != nil { + return err + } + + filerCat.filerClient = client + + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + + }) + + return true +} diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index f14d18c52..e7a9b107f 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,13 +14,17 @@ import ( "sync" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/spf13/viper" - "google.golang.org/grpc" ) var ( @@ -33,13 +37,15 @@ type CopyOptions struct { replication *string collection *string ttl *string + diskType *string maxMB *int masterClient *wdclient.MasterClient concurrenctFiles *int concurrenctChunks *int - compressionLevel *int grpcDialOption grpc.DialOption masters []string + cipher bool + ttlSec int32 } func init() { @@ -49,10 +55,10 @@ func init() { copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") + copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit") copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") - copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9") } var cmdCopy = &Command{ @@ -68,7 +74,7 @@ var cmdCopy = &Command{ If "maxMB" is set to a positive number, files larger than it would be split into chunks. - `, +`, } func runCopy(cmd *Command, args []string) bool { @@ -88,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool { } urlPath := filerUrl.Path if !strings.HasSuffix(urlPath, "/") { - fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err) + fmt.Printf("The last argument should be a folder and end with \"/\"\n") return false } @@ -105,15 +111,25 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcPort := filerPort + 10000 filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) - copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") - - ctx := context.Background() + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress) + masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) if err != nil { fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) return false } + if strings.HasPrefix(urlPath, dirBuckets+"/") { + restPath := urlPath[len(dirBuckets)+1:] + if strings.Index(restPath, "/") > 0 { + expectedBucket := restPath[:strings.Index(restPath, "/")] + if *copy.collection == "" { + *copy.collection = expectedBucket + } else if *copy.collection != expectedBucket { + fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection) + return true + } + } + } if *copy.collection == "" { *copy.collection = collection } @@ -124,13 +140,17 @@ func runCopy(cmd *Command, args []string) bool { *copy.maxMB = int(maxMB) } copy.masters = masters + copy.cipher = cipher - copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters) - go copy.masterClient.KeepConnectedToMaster() - copy.masterClient.WaitUntilConnected() + ttl, err := needle.ReadTTL(*copy.ttl) + if err != nil { + fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err) + return false + } + copy.ttlSec = int32(ttl.Minutes()) * 60 if *cmdCopy.IsDebug { - util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") + grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") } fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles) @@ -139,7 +159,7 @@ func runCopy(cmd *Command, args []string) bool { defer close(fileCopyTaskChan) for _, fileOrDir := range fileOrDirs { if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil { - fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err) + fmt.Fprintf(os.Stderr, "genFileCopyTask : %v\n", err) break } } @@ -153,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool { filerHost: filerUrl.Host, filerGrpcAddress: filerGrpcAddress, } - if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + if err := worker.copyFiles(fileCopyTaskChan); err != nil { fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) return } @@ -164,13 +184,15 @@ func runCopy(cmd *Command, args []string) bool { return true } -func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { - err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + dirBuckets = resp.DirBuckets + cipher = resp.Cipher return nil }) return @@ -180,21 +202,11 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi fi, err := os.Stat(fileOrDir) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err) + fmt.Fprintf(os.Stderr, "Error: read file %s: %v\n", fileOrDir, err) return nil } mode := fi.Mode() - if mode.IsDir() { - files, _ := ioutil.ReadDir(fileOrDir) - for _, subFileOrDir := range files { - if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { - return err - } - } - return nil - } - uid, gid := util.GetFileUidGid(fi) fileCopyTaskChan <- FileCopyTask{ @@ -206,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi gid: gid, } + if mode.IsDir() { + files, _ := ioutil.ReadDir(fileOrDir) + println("checking directory", fileOrDir) + for _, subFileOrDir := range files { + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err + } + } + } + return nil } @@ -215,9 +237,9 @@ type FileCopyWorker struct { filerGrpcAddress string } -func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { +func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { for task := range fileCopyTaskChan { - if err := worker.doEachCopy(ctx, task); err != nil { + if err := worker.doEachCopy(task); err != nil { return err } } @@ -233,7 +255,7 @@ type FileCopyTask struct { gid uint32 } -func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { +func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { f, err := os.Open(task.sourceLocation) if err != nil { @@ -261,36 +283,58 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) } if chunkCount == 1 { - return worker.uploadFileAsOne(ctx, task, f) + return worker.uploadFileAsOne(task, f) } - return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) + return worker.uploadFileInChunks(task, f, chunkCount, chunkSize) } -func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { +func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) - mimeType := detectMimeType(f) + var mimeType string var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error - if task.fileSize > 0 { + if task.fileMode & os.ModeDir == 0 && task.fileSize > 0 { + + mimeType = detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { - fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err) } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel) + uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth)) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -299,18 +343,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy } fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) - chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: 0, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }) + chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0)) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -325,13 +363,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy Mime: mimeType, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -342,7 +380,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy return nil } -func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { +func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -352,6 +390,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) var wg sync.WaitGroup var uploadError error + var collection, replication string fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { @@ -363,22 +402,43 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC <-concurrentChunks }() // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath + fileName, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } - uploadResult, err := operation.Upload(targetUrl, - fileName+"-"+strconv.FormatInt(i+1, 10), - io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "", nil, assignResult.Auth) + uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -387,13 +447,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) return } - chunksChan <- &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: i * chunkSize, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - } + chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize) + fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) } @@ -410,11 +465,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds) + operation.DeleteFiles(func() string { + return copy.masters[0] + }, false, worker.options.grpcDialOption, fileIds) return uploadError } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -427,15 +484,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + Replication: replication, + Collection: collection, + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -457,18 +514,12 @@ func detectMimeType(f *os.File) string { } if err != nil { fmt.Printf("read head of %v: %v\n", f.Name(), err) - return "application/octet-stream" + return "" } f.Seek(0, io.SeekStart) mimeType := http.DetectContentType(head[:n]) + if mimeType == "application/octet-stream" { + return "" + } return mimeType } - -func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - - return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(clientConn) - return fn(client) - }, filerAddress, grpcDialOption) - -} diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go new file mode 100644 index 000000000..ba0b44659 --- /dev/null +++ b/weed/command/filer_meta_backup.go @@ -0,0 +1,268 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/spf13/viper" + "google.golang.org/grpc" + "io" + "reflect" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + metaBackup FilerMetaBackupOptions +) + +type FilerMetaBackupOptions struct { + grpcDialOption grpc.DialOption + filerAddress *string + filerDirectory *string + restart *bool + backupFilerConfig *string + + store filer.FilerStore +} + +func init() { + cmdFilerMetaBackup.Run = runFilerMetaBackup // break init cycle + metaBackup.filerAddress = cmdFilerMetaBackup.Flag.String("filer", "localhost:8888", "filer hostname:port") + metaBackup.filerDirectory = cmdFilerMetaBackup.Flag.String("filerDir", "/", "a folder on the filer") + metaBackup.restart = cmdFilerMetaBackup.Flag.Bool("restart", false, "copy the full metadata before async incremental backup") + metaBackup.backupFilerConfig = cmdFilerMetaBackup.Flag.String("config", "", "path to filer.toml specifying backup filer store") +} + +var cmdFilerMetaBackup = &Command{ + UsageLine: "filer.meta.backup [-filer=localhost:8888] [-filerDir=/] [-restart] -config=/path/to/backup_filer.toml", + Short: "continuously backup filer meta data changes to anther filer store specified in a backup_filer.toml", + Long: `continuously backup filer meta data changes. +The backup writes to another filer store specified in a backup_filer.toml. + + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" -restart + + `, +} + +func runFilerMetaBackup(cmd *Command, args []string) bool { + + metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + // load backup_filer.toml + v := viper.New() + v.SetConfigFile(*metaBackup.backupFilerConfig) + + if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file + glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+ + " weed scaffold -config=%s -output=.\n\n\n", + *metaBackup.backupFilerConfig, "backup_filer", "filer") + } + + if err := metaBackup.initStore(v); err != nil { + glog.V(0).Infof("init backup filer store: %v", err) + return true + } + + missingPreviousBackup := false + _, err := metaBackup.getOffset() + if err != nil { + missingPreviousBackup = true + } + + if *metaBackup.restart || missingPreviousBackup { + glog.V(0).Infof("traversing metadata tree...") + startTime := time.Now() + if err := metaBackup.traverseMetadata(); err != nil { + glog.Errorf("traverse meta data: %v", err) + return true + } + glog.V(0).Infof("metadata copied up to %v", startTime) + if err := metaBackup.setOffset(startTime); err != nil { + startTime = time.Now() + } + } + + for { + err := metaBackup.streamMetadataBackup() + if err != nil { + glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err) + time.Sleep(1747 * time.Millisecond) + } + } + + return true +} + +func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error { + // load configuration for default filer store + hasDefaultStoreConfigured := false + for _, store := range filer.Stores { + if v.GetBool(store.GetName() + ".enabled") { + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore) + if err := store.Initialize(v, store.GetName()+"."); err != nil { + glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + } + glog.V(0).Infof("configured filer store to %s", store.GetName()) + hasDefaultStoreConfigured = true + metaBackup.store = filer.NewFilerStoreWrapper(store) + break + } + } + if !hasDefaultStoreConfigured { + return fmt.Errorf("no filer store enabled in %s", v.ConfigFileUsed()) + } + + return nil +} + +func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) { + var saveErr error + + traverseErr := filer_pb.TraverseBfs(metaBackup, util.FullPath(*metaBackup.filerDirectory), func(parentPath util.FullPath, entry *filer_pb.Entry) { + + println("+", parentPath.Child(entry.Name)) + if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil { + saveErr = fmt.Errorf("insert entry error: %v\n", err) + return + } + + }) + + if traverseErr != nil { + return fmt.Errorf("traverse: %v", traverseErr) + } + return saveErr +} + +var ( + MetaBackupKey = []byte("metaBackup") +) + +func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { + + startTime, err := metaBackup.getOffset() + if err != nil { + startTime = time.Now() + } + glog.V(0).Infof("streaming from %v", startTime) + + store := metaBackup.store + + eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { + + ctx := context.Background() + message := resp.EventNotification + + if message.OldEntry == nil && message.NewEntry == nil { + return nil + } + if message.OldEntry == nil && message.NewEntry != nil { + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.InsertEntry(ctx, entry) + } + if message.OldEntry != nil && message.NewEntry == nil { + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + return store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + } + if message.OldEntry != nil && message.NewEntry != nil { + if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name { + println("~", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.UpdateEntry(ctx, entry) + } + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + if err := store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)); err != nil { + return err + } + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + return store.InsertEntry(ctx, filer.FromPbEntry(message.NewParentPath, message.NewEntry)) + } + + return nil + } + + tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "meta_backup", + PathPrefix: *metaBackup.filerDirectory, + SinceNs: startTime.UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + if err = eachEntryFunc(resp); err != nil { + return err + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil { + return err2 + } + } + + } + + }) + return tailErr +} + +func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) { + value, err := metaBackup.store.KvGet(context.Background(), MetaBackupKey) + if err != nil { + return + } + tsNs := util.BytesToUint64(value) + + return time.Unix(0, int64(tsNs)), nil +} + +func (metaBackup *FilerMetaBackupOptions) setOffset(lastWriteTime time.Time) error { + valueBuf := make([]byte, 8) + util.Uint64toBytes(valueBuf, uint64(lastWriteTime.UnixNano())) + + if err := metaBackup.store.KvPut(context.Background(), MetaBackupKey, valueBuf); err != nil { + return err + } + return nil +} + +var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{}) + +func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return fn(client) + }) + +} + +func (metaBackup *FilerMetaBackupOptions) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go new file mode 100644 index 000000000..8451ffd78 --- /dev/null +++ b/weed/command/filer_meta_tail.go @@ -0,0 +1,211 @@ +package command + +import ( + "context" + "fmt" + "github.com/golang/protobuf/jsonpb" + jsoniter "github.com/json-iterator/go" + "github.com/olivere/elastic/v7" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle +} + +var cmdFilerMetaTail = &Command{ + UsageLine: "filer.meta.tail [-filer=localhost:8888] [-pathPrefix=/]", + Short: "see continuous changes on a filer", + Long: `See continuous changes on a filer. + + weed filer.meta.tail -timeAgo=30h | grep truncate + weed filer.meta.tail -timeAgo=30h | jq . + weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name + + `, +} + +var ( + tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port") + tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or common prefix for the folders or files on filer") + tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") + tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ") + esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://<host:port>") + esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name") +) + +func runFilerMetaTail(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var filterFunc func(dir, fname string) bool + if *tailPattern != "" { + if strings.Contains(*tailPattern, "/") { + println("watch path pattern", *tailPattern) + filterFunc = func(dir, fname string) bool { + matched, err := filepath.Match(*tailPattern, dir+"/"+fname) + if err != nil { + fmt.Printf("error: %v", err) + } + return matched + } + } else { + println("watch file pattern", *tailPattern) + filterFunc = func(dir, fname string) bool { + matched, err := filepath.Match(*tailPattern, fname) + if err != nil { + fmt.Printf("error: %v", err) + } + return matched + } + } + } + + shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool { + if filterFunc == nil { + return true + } + if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil { + return false + } + if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) { + return true + } + if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) { + return true + } + return false + } + + jsonpbMarshaler := jsonpb.Marshaler{ + EmitDefaults: false, + } + eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { + jsonpbMarshaler.Marshal(os.Stdout, resp) + fmt.Fprintln(os.Stdout) + return nil + } + if *esServers != "" { + var err error + eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex) + if err != nil { + fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err) + return false + } + } + + tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "tail", + PathPrefix: *tailTarget, + SinceNs: time.Now().Add(-*tailStart).UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + if !shouldPrint(resp) { + continue + } + if err = eachEntryFunc(resp); err != nil { + return err + } + } + + }) + if tailErr != nil { + fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) + } + + return true +} + +type EsDocument struct { + Dir string `json:"dir,omitempty"` + Name string `json:"name,omitempty"` + IsDirectory bool `json:"isDir,omitempty"` + Size uint64 `json:"size,omitempty"` + Uid uint32 `json:"uid,omitempty"` + Gid uint32 `json:"gid,omitempty"` + UserName string `json:"userName,omitempty"` + Collection string `json:"collection,omitempty"` + Crtime int64 `json:"crtime,omitempty"` + Mtime int64 `json:"mtime,omitempty"` + Mime string `json:"mime,omitempty"` +} + +func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) { + entry := event.NewEntry + dir, name := event.NewParentPath, entry.Name + id := util.Md5String([]byte(util.NewFullPath(dir, name))) + esEntry := &EsDocument{ + Dir: dir, + Name: name, + IsDirectory: entry.IsDirectory, + Size: entry.Attributes.FileSize, + Uid: entry.Attributes.Uid, + Gid: entry.Attributes.Gid, + UserName: entry.Attributes.UserName, + Collection: entry.Attributes.Collection, + Crtime: entry.Attributes.Crtime, + Mtime: entry.Attributes.Mtime, + Mime: entry.Attributes.Mime, + } + return esEntry, id +} + +func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) { + options := []elastic.ClientOptionFunc{} + options = append(options, elastic.SetURL(strings.Split(servers, ",")...)) + options = append(options, elastic.SetSniff(false)) + options = append(options, elastic.SetHealthcheck(false)) + client, err := elastic.NewClient(options...) + if err != nil { + return nil, err + } + return func(resp *filer_pb.SubscribeMetadataResponse) error { + event := resp.EventNotification + if event.OldEntry != nil && + (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) { + // delete or not update the same file + dir, name := resp.Directory, event.OldEntry.Name + id := util.Md5String([]byte(util.NewFullPath(dir, name))) + println("delete", id) + _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background()) + return err + } + if event.NewEntry != nil { + // add a new file or update the same file + esEntry, id := toEsEntry(event) + value, err := jsoniter.Marshal(esEntry) + if err != nil { + return err + } + println(string(value)) + _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background()) + return err + } + return nil + }, nil +} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c6e7f5dba..885c95540 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -11,10 +11,10 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" "github.com/chrislusf/seaweedfs/weed/replication/sub" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) func init() { @@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) - config := viper.GetViper() + config := util.GetViper() var notificationInput sub.NotificationInput @@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -66,29 +65,16 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } } } - var dataSink sink.ReplicationSink - for _, sk := range sink.Sinks { - if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize sink for %s: %+v", - sk.GetName(), err) - } - glog.V(0).Infof("Configure sink to %s", sk.GetName()) - dataSink = sk - break - } - } + dataSink := findSink(config) if dataSink == nil { println("no data sink configured in replication.toml:") @@ -98,16 +84,22 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { - key, m, err := notificationInput.ReceiveMessage() + key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage() if err != nil { glog.Errorf("receive %s: %+v", key, err) + if onFailureFn != nil { + onFailureFn() + } continue } if key == "" { // long poll received no messages + if onSuccessFn != nil { + onSuccessFn() + } continue } if m.OldEntry != nil && m.NewEntry == nil { @@ -119,15 +111,36 @@ func runFilerReplicate(cmd *Command, args []string) bool { } if err = replicator.Replicate(context.Background(), key, m); err != nil { glog.Errorf("replicate %s: %+v", key, err) + if onFailureFn != nil { + onFailureFn() + } } else { glog.V(1).Infof("replicated %s", key) + if onSuccessFn != nil { + onSuccessFn() + } } } - return true } -func validateOneEnabledInput(config *viper.Viper) { +func findSink(config *util.ViperProxy) sink.ReplicationSink { + var dataSink sink.ReplicationSink + for _, sk := range sink.Sinks { + if config.GetBool("sink." + sk.GetName() + ".enabled") { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { + glog.Fatalf("Failed to initialize sink for %s: %+v", + sk.GetName(), err) + } + glog.V(0).Infof("Configure sink to %s", sk.GetName()) + dataSink = sk + break + } + } + return dataSink +} + +func validateOneEnabledInput(config *util.ViperProxy) { enabledInput := "" for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go new file mode 100644 index 000000000..0f34e5701 --- /dev/null +++ b/weed/command/filer_sync.go @@ -0,0 +1,374 @@ +package command + +import ( + "context" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" + "google.golang.org/grpc" + "io" + "strings" + "time" +) + +type SyncOptions struct { + isActivePassive *bool + filerA *string + filerB *string + aPath *string + bPath *string + aReplication *string + bReplication *string + aCollection *string + bCollection *string + aTtlSec *int + bTtlSec *int + aDiskType *string + bDiskType *string + aDebug *bool + bDebug *bool + aProxyByFiler *bool + bProxyByFiler *bool +} + +var ( + syncOptions SyncOptions + syncCpuProfile *string + syncMemProfile *string +) + +func init() { + cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle + syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true") + syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster") + syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster") + syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A") + syncOptions.bPath = cmdFilerSynchronize.Flag.String("b.path", "/", "directory to sync on filer B") + syncOptions.aReplication = cmdFilerSynchronize.Flag.String("a.replication", "", "replication on filer A") + syncOptions.bReplication = cmdFilerSynchronize.Flag.String("b.replication", "", "replication on filer B") + syncOptions.aCollection = cmdFilerSynchronize.Flag.String("a.collection", "", "collection on filer A") + syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B") + syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A") + syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B") + syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag on filer A") + syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag on filer B") + syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers") + syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers") + syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files") + syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files") + syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file") + syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdFilerSynchronize = &Command{ + UsageLine: "filer.sync -a=<oneFilerHost>:<oneFilerPort> -b=<otherFilerHost>:<otherFilerPort>", + Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters", + Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers + + filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content, + and write to the other destination. Different from filer.replicate: + + * filer.sync only works between two filers. + * filer.sync does not need any special message queue setup. + * filer.sync supports both active-active and active-passive modes. + + If restarted, the synchronization will resume from the previous checkpoints, persisted every minute. + A fresh sync will start from the earliest metadata logs. + +`, +} + +func runFilerSynchronize(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + grace.SetupProfiling(*syncCpuProfile, *syncMemProfile) + + go func() { + for { + err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB, + *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug) + if err != nil { + glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err) + time.Sleep(1747 * time.Millisecond) + } + } + }() + + if !*syncOptions.isActivePassive { + go func() { + for { + err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA, + *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug) + if err != nil { + glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err) + time.Sleep(2147 * time.Millisecond) + } + } + }() + } + + select {} + + return true +} + +func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string, + replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error { + + // read source filer signature + sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler) + if sourceErr != nil { + return sourceErr + } + // read target filer signature + targetFilerSignature, targetErr := replication.ReadFilerSignature(grpcDialOption, targetFiler) + if targetErr != nil { + return targetErr + } + + // if first time, start from now + // if has previously synced, resume from that point of time + sourceFilerOffsetTsNs, err := getOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature) + if err != nil { + return err + } + + glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs) + + // create filer sink + filerSource := &source.FilerSource{} + filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler) + filerSink := &filersink.FilerSink{} + filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler) + filerSink.SetSourceFiler(filerSource) + + persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug) + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + for _, sig := range message.Signatures { + if sig == targetFilerSignature && targetFilerSignature != 0 { + fmt.Printf("%s skipping %s change to %v\n", targetFiler, sourceFiler, message) + return nil + } + } + return persistEventFn(resp) + } + + return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "syncTo_" + targetFiler, + PathPrefix: sourcePath, + SinceNs: sourceFilerOffsetTsNs, + Signature: targetFilerSignature, + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return err + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, resp.TsNs); err != nil { + return err + } + } + + } + + }) + +} + +const ( + SyncKeyPrefix = "sync." +) + +func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) { + + readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + syncKey := []byte(signaturePrefix + "____") + util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) + + resp, err := client.KvGet(context.Background(), &filer_pb.KvGetRequest{Key: syncKey}) + if err != nil { + return err + } + + if len(resp.Error) != 0 { + return errors.New(resp.Error) + } + if len(resp.Value) < 8 { + return nil + } + + lastOffsetTsNs = int64(util.BytesToUint64(resp.Value)) + + return nil + }) + + return + +} + +func setOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32, offsetTsNs int64) error { + return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + syncKey := []byte(signaturePrefix + "____") + util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) + + valueBuf := make([]byte, 8) + util.Uint64toBytes(valueBuf, uint64(offsetTsNs)) + + resp, err := client.KvPut(context.Background(), &filer_pb.KvPutRequest{ + Key: syncKey, + Value: valueBuf, + }) + if err != nil { + return err + } + + if len(resp.Error) != 0 { + return errors.New(resp.Error) + } + + return nil + + }) + +} + +func genProcessFunction(sourcePath string, targetPath string, dataSink sink.ReplicationSink, debug bool) func(resp *filer_pb.SubscribeMetadataResponse) error { + // process function + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + + var sourceOldKey, sourceNewKey util.FullPath + if message.OldEntry != nil { + sourceOldKey = util.FullPath(resp.Directory).Child(message.OldEntry.Name) + } + if message.NewEntry != nil { + sourceNewKey = util.FullPath(message.NewParentPath).Child(message.NewEntry.Name) + } + + if debug { + glog.V(0).Infof("received %v", resp) + } + + if !strings.HasPrefix(resp.Directory, sourcePath) { + return nil + } + + // handle deletions + if message.OldEntry != nil && message.NewEntry == nil { + if !strings.HasPrefix(string(sourceOldKey), sourcePath) { + return nil + } + key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) + return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + } + + // handle new entries + if message.OldEntry == nil && message.NewEntry != nil { + if !strings.HasPrefix(string(sourceNewKey), sourcePath) { + return nil + } + key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) + } + + // this is something special? + if message.OldEntry == nil && message.NewEntry == nil { + return nil + } + + // handle updates + if strings.HasPrefix(string(sourceOldKey), sourcePath) { + // old key is in the watched directory + if strings.HasPrefix(string(sourceNewKey), sourcePath) { + // new key is also in the watched directory + if !dataSink.IsIncremental() { + oldKey := util.Join(targetPath, string(sourceOldKey)[len(sourcePath):]) + message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath):]) + foundExisting, err := dataSink.UpdateEntry(string(oldKey), message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) + if foundExisting { + return err + } + + // not able to find old entry + if err = dataSink.DeleteEntry(string(oldKey), message.OldEntry.IsDirectory, false, message.Signatures); err != nil { + return fmt.Errorf("delete old entry %v: %v", oldKey, err) + } + } + // create the new entry + newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures) + + } else { + // new key is outside of the watched directory + if !dataSink.IsIncremental() { + key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) + return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + } + } + } else { + // old key is outside of the watched directory + if strings.HasPrefix(string(sourceNewKey), sourcePath) { + // new key is in the watched directory + key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) + } else { + // new key is also outside of the watched directory + // skip + } + } + + return nil + } + return processEventFn +} + +func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) string { + if !dataSink.IsIncremental() { + return util.Join(targetPath, string(sourceKey)[len(sourcePath):]) + } + var mTime int64 + if message.NewEntry != nil { + mTime = message.NewEntry.Attributes.Mtime + } else if message.OldEntry != nil { + mTime = message.OldEntry.Attributes.Mtime + } + dateKey := time.Unix(mTime, 0).Format("2006-01-02") + return util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):]) +} diff --git a/weed/command/fix.go b/weed/command/fix.go index 76bc19f7e..ae9a051b8 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -46,8 +47,8 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { } func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) - if n.Size > 0 && n.Size != types.TombstoneFileSize { + glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) + if n.Size.IsValid() { pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { @@ -67,23 +68,23 @@ func runFix(cmd *Command, args []string) bool { if *fixVolumeCollection != "" { baseFileName = *fixVolumeCollection + "_" + baseFileName } - indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() + indexFileName := path.Join(util.ResolvePath(*fixVolumePath), baseFileName+".idx") nm := needle_map.NewMemDb() + defer nm.Close() vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } diff --git a/weed/command/gateway.go b/weed/command/gateway.go new file mode 100644 index 000000000..8a6f852a5 --- /dev/null +++ b/weed/command/gateway.go @@ -0,0 +1,93 @@ +package command + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + gatewayOptions GatewayOptions +) + +type GatewayOptions struct { + masters *string + filers *string + bindIp *string + port *int + maxMB *int +} + +func init() { + cmdGateway.Run = runGateway // break init cycle + gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers") + gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers") + gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to") + gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port") + gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit") +} + +var cmdGateway = &Command{ + UsageLine: "gateway -port=8888 -master=<ip:port>[,<ip:port>]* -filer=<ip:port>[,<ip:port>]*", + Short: "start a gateway server that points to a list of master servers or a list of filers", + Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages. + + POST /blobs/ + upload the blob and return a chunk id + DELETE /blobs/<chunk_id> + delete a chunk id + + /* + POST /files/path/to/a/file + save /path/to/a/file on filer + DELETE /files/path/to/a/file + delete /path/to/a/file on filer + + POST /topics/topicName + save on filer to /topics/topicName/<ds>/ts.json + */ +`, +} + +func runGateway(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + gatewayOptions.startGateway() + + return true +} + +func (gw *GatewayOptions) startGateway() { + + defaultMux := http.NewServeMux() + + _, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{ + Masters: strings.Split(*gw.masters, ","), + Filers: strings.Split(*gw.filers, ","), + MaxMB: *gw.maxMB, + }) + if gws_err != nil { + glog.Fatalf("Gateway startup error: %v", gws_err) + } + + glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port) + gatewayListener, e := util.NewListener( + *gw.bindIp+":"+strconv.Itoa(*gw.port), + time.Duration(10)*time.Second, + ) + if e != nil { + glog.Fatalf("Filer listener error: %v", e) + } + + httpS := &http.Server{Handler: defaultMux} + if err := httpS.Serve(gatewayListener); err != nil { + glog.Fatalf("Gateway Fail to serve: %v", e) + } + +} diff --git a/weed/command/iam.go b/weed/command/iam.go new file mode 100644 index 000000000..17d0832cb --- /dev/null +++ b/weed/command/iam.go @@ -0,0 +1,97 @@ +package command + +import ( + "context" + "fmt" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/iamapi" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gorilla/mux" + "time" +) + +var ( + iamStandaloneOptions IamOptions +) + +type IamOptions struct { + filer *string + masters *string + port *int +} + +func init() { + cmdIam.Run = runIam // break init cycle + iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address") + iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers") + iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port") +} + +var cmdIam = &Command{ + UsageLine: "iam [-port=8111] [-filer=<ip:port>] [-masters=<ip:port>,<ip:port>]", + Short: "start a iam API compatible server", + Long: "start a iam API compatible server.", +} + +func runIam(cmd *Command, args []string) bool { + return iamStandaloneOptions.startIamServer() +} + +func (iamopt *IamOptions) startIamServer() bool { + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + glog.V(0).Infof("IAM read filer configuration: %s", resp) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + break + } + } + + router := mux.NewRouter().SkipClean(true) + _, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{ + Filer: *iamopt.filer, + Port: *iamopt.port, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + }) + glog.V(0).Info("NewIamApiServer created") + if iamApiServer_err != nil { + glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err) + } + + httpS := &http.Server{Handler: router} + + listenAddress := fmt.Sprintf(":%d", *iamopt.port) + iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) + if err != nil { + glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) + } + + glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) + if err = httpS.Serve(iamApiListener); err != nil { + glog.Fatalf("IAM API Server Fail to serve: %v", err) + } + + return true +} diff --git a/weed/command/master.go b/weed/command/master.go index 8d0a3289c..0f5e2156d 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -1,22 +1,25 @@ package command import ( + "github.com/chrislusf/raft/protobuf" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" "net/http" "os" - "runtime" + "sort" "strconv" "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util/grace" - "github.com/chrislusf/raft/protobuf" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc/reflection" ) var ( @@ -24,38 +27,40 @@ var ( ) type MasterOptions struct { - port *int - ip *string - ipBind *string - metaFolder *string - peers *string - volumeSizeLimitMB *uint - volumePreallocate *bool - pulseSeconds *int + port *int + ip *string + ipBind *string + metaFolder *string + peers *string + volumeSizeLimitMB *uint + volumePreallocate *bool + // pulseSeconds *int defaultReplication *string garbageThreshold *float64 whiteList *string disableHttp *bool metricsAddress *string metricsIntervalSec *int + raftResumeState *bool } func init() { cmdMaster.Run = runMaster // break init cycle m.port = cmdMaster.Flag.Int("port", 9333, "http listen port") - m.ip = cmdMaster.Flag.String("ip", "localhost", "master <ip>|<server> address") - m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master <ip>|<server> address, also used as identifier") + m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to") m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") - m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") + m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095") m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") - m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") - m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address") + m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address <host>:<port>") m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server") } var cmdMaster = &Command{ @@ -63,7 +68,7 @@ var cmdMaster = &Command{ Short: "start a master server", Long: `start a master server to provide volume=>location mapping service and sequence number of file ids - The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order. The example security.toml configuration file can be generated by "weed scaffold -config=security" @@ -80,10 +85,13 @@ func runMaster(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - runtime.GOMAXPROCS(runtime.NumCPU()) - util.SetupProfiling(*masterCpuProfile, *masterMemProfile) + grace.SetupProfiling(*masterCpuProfile, *masterMemProfile) - if err := util.TestFolderWritable(*m.metaFolder); err != nil { + parent, _ := util.FullPath(*m.metaFolder).DirAndName() + if util.FileExists(string(parent)) && !util.FileExists(*m.metaFolder) { + os.MkdirAll(*m.metaFolder, 0755) + } + if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil { glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } @@ -102,23 +110,23 @@ func runMaster(cmd *Command, args []string) bool { func startMaster(masterOption MasterOptions, masterWhiteList []string) { - backend.LoadConfiguration(viper.GetViper()) + backend.LoadConfiguration(util.GetViper()) myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) r := mux.NewRouter() ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers) listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", util.VERSION, listeningAddress) + glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) masterListener, e := util.NewListener(listeningAddress, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } // start raftServer - raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), - peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds) + raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), + peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState) if raftServer == nil { - glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) + glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) } ms.SetRaftServer(raftServer) r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") @@ -128,14 +136,22 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } - // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterOption.ipBind, grpcPort) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) go grpcS.Serve(grpcL) + go func() { + time.Sleep(1500 * time.Millisecond) + if ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() && isTheFirstOne(myMasterAddress, peers) { + if ms.MasterClient.FindLeaderFromOtherPeers(myMasterAddress) == "" { + raftServer.DoJoinCommand() + } + } + }() + go ms.MasterClient.KeepConnectedToMaster() // start http server @@ -146,6 +162,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { } func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) { + glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers) masterAddress = masterIp + ":" + strconv.Itoa(masterPort) if peers != "" { cleanedPeers = strings.Split(peers, ",") @@ -168,13 +185,22 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st return } +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers) <= 0 { + return true + } + return self == peers[0] +} + func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { return &weed_server.MasterOption{ - Port: *m.port, - MetaFolder: *m.metaFolder, - VolumeSizeLimitMB: *m.volumeSizeLimitMB, - VolumePreallocate: *m.volumePreallocate, - PulseSeconds: *m.pulseSeconds, + Host: *m.ip, + Port: *m.port, + MetaFolder: *m.metaFolder, + VolumeSizeLimitMB: *m.volumeSizeLimitMB, + VolumePreallocate: *m.volumePreallocate, + // PulseSeconds: *m.pulseSeconds, DefaultReplicaPlacement: *m.defaultReplication, GarbageThreshold: *m.garbageThreshold, WhiteList: whiteList, diff --git a/weed/command/mount.go b/weed/command/mount.go index f09b285f7..5811f0b99 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -1,29 +1,38 @@ package command import ( - "fmt" - "strconv" - "strings" + "os" + "time" ) type MountOptions struct { filer *string filerMountRootPath *string dir *string - dirListCacheLimit *int64 + dirAutoCreate *bool collection *string replication *string + diskType *string ttlSec *int chunkSizeLimitMB *int + concurrentWriters *int + cacheDir *string + cacheSizeMB *int64 dataCenter *string allowOthers *bool umaskString *string + nonempty *bool + volumeServerAccess *string + uidMap *string + gidMap *string + readOnly *bool } var ( - mountOptions MountOptions - mountCpuProfile *string - mountMemProfile *string + mountOptions MountOptions + mountCpuProfile *string + mountMemProfile *string + mountReadRetryTime *time.Duration ) func init() { @@ -31,16 +40,27 @@ func init() { mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") - mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing") + mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") + mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") - mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") + mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") + mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0") + mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") + mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") + mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory") + mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]") + mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated <local_uid>:<filer_uid>") + mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated <local_gid>:<filer_gid>") + mountOptions.readOnly = cmdMount.Flag.Bool("readOnly", false, "read only") + mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") + mountReadRetryTime = cmdMount.Flag.Duration("readRetryTime", 6*time.Second, "maximum read retry wait time") } var cmdMount = &Command{ @@ -60,19 +80,3 @@ var cmdMount = &Command{ `, } - -func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) - } - - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("filer port parse error: %v", parseErr) - } - - filerGrpcPort := int(filerPort) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil -} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 80a5f9da4..25c4f72cf 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) { } func osSpecificMountOptions() []fuse.MountOption { - return []fuse.MountOption{ - fuse.AllowNonEmptyMount(), - } + return []fuse.MountOption{} } func checkMountPointAvailable(dir string) bool { diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 453531d00..2474cf7dd 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -3,7 +3,9 @@ package command import ( + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "os" "os/user" "path" @@ -12,20 +14,27 @@ import ( "strings" "time" - "github.com/jacobsa/daemonize" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/util/grace" ) func runMount(cmd *Command, args []string) bool { - util.SetupProfiling(*mountCpuProfile, *mountMemProfile) + grace.SetupProfiling(*mountCpuProfile, *mountMemProfile) + if *mountReadRetryTime < time.Second { + *mountReadRetryTime = time.Second + } + util.RetryWaitTime = *mountReadRetryTime umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64) if umaskErr != nil { @@ -33,27 +42,52 @@ func runMount(cmd *Command, args []string) bool { return false } - return RunMount( - *mountOptions.filer, - *mountOptions.filerMountRootPath, - *mountOptions.dir, - *mountOptions.collection, - *mountOptions.replication, - *mountOptions.dataCenter, - *mountOptions.chunkSizeLimitMB, - *mountOptions.allowOthers, - *mountOptions.ttlSec, - *mountOptions.dirListCacheLimit, - os.FileMode(umask), - ) + if len(args) > 0 { + return false + } + + return RunMount(&mountOptions, os.FileMode(umask)) } -func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool { +func RunMount(option *MountOptions, umask os.FileMode) bool { + + filer := *option.filer + // parse filer grpc address + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(filer) + if err != nil { + glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) + return true + } util.LoadConfiguration("security", false) + // try to connect to filer, filerBucketsPath may be useful later + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + var cipher bool + for i := 0; i < 10; i++ { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("failed to talk to filer %s: %v", filerGrpcAddress, err) + glog.V(0).Infof("wait for %d seconds ...", i+1) + time.Sleep(time.Duration(i+1) * time.Second) + } + } + if err != nil { + glog.Errorf("failed to talk to filer %s: %v", filerGrpcAddress, err) + return true + } + + filerMountRootPath := *option.filerMountRootPath + dir := util.ResolvePath(*option.dir) + chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if dir == "" { fmt.Printf("Please specify the mount directory via \"-dir\"") return false @@ -65,15 +99,21 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.Unmount(dir) - uid, gid := uint32(0), uint32(0) - // detect mount folder mode - mountMode := os.ModeDir | 0755 + if *option.dirAutoCreate { + os.MkdirAll(dir, os.FileMode(0777)&^umask) + } fileInfo, err := os.Stat(dir) + + uid, gid := uint32(0), uint32(0) + mountMode := os.ModeDir | 0777 if err == nil { - mountMode = os.ModeDir | fileInfo.Mode() + mountMode = os.ModeDir | os.FileMode(0777)&^umask uid, gid = util.GetFileUidGid(fileInfo) - fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode()) + fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode) + } else { + fmt.Printf("can not stat %s\n", dir) + return false } if uid == 0 { @@ -88,10 +128,17 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente } } + // mapping uid, gid + uidGidMapper, err := meta_cache.NewUidGidMapper(*option.uidMap, *option.gidMap) + if err != nil { + fmt.Printf("failed to parse %s %s: %v\n", *option.uidMap, *option.gidMap, err) + return false + } + // Ensure target mount point availability if isValid := checkMountPointAvailable(dir); !isValid { glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) - return false + return true } mountName := path.Base(dir) @@ -100,10 +147,8 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.VolumeName(mountName), fuse.FSName(filer + ":" + filerMountRootPath), fuse.Subtype("seaweedfs"), - fuse.NoAppleDouble(), + // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), - fuse.NoBrowse(), - fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), fuse.AllowSUID(), @@ -111,68 +156,77 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - fuse.AllowNonEmptyMount(), + fuse.MaxBackground(128), + fuse.CongestionThreshold(128), } options = append(options, osSpecificMountOptions()...) - - if allowOthers { + if *option.allowOthers { options = append(options, fuse.AllowOther()) } - - c, err := fuse.Mount(dir, options...) - if err != nil { - glog.V(0).Infof("mount: %v", err) - daemonize.SignalOutcome(err) - return true + if *option.nonempty { + options = append(options, fuse.AllowNonEmptyMount()) } - - util.OnInterrupt(func() { - fuse.Unmount(dir) - c.Close() - }) - - filerGrpcAddress, err := parseFilerGrpcAddress(filer) - if err != nil { - glog.V(0).Infof("parseFilerGrpcAddress: %v", err) - daemonize.SignalOutcome(err) - return true + if *option.readOnly { + options = append(options, fuse.ReadOnly()) } + // find mount point mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] } - daemonize.SignalOutcome(nil) + diskType := types.ToDiskType(*option.diskType) - err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ + seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{ + MountDirectory: dir, + FilerAddress: filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: grpcDialOption, FilerMountRootPath: mountRoot, - Collection: collection, - Replication: replication, - TtlSec: int32(ttlSec), + Collection: *option.collection, + Replication: *option.replication, + TtlSec: int32(*option.ttlSec), + DiskType: diskType, ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, - DataCenter: dataCenter, - DirListCacheLimit: dirListCacheLimit, - EntryCacheTtl: 3 * time.Second, + ConcurrentWriters: *option.concurrentWriters, + CacheDir: *option.cacheDir, + CacheSizeMB: *option.cacheSizeMB, + DataCenter: *option.dataCenter, MountUid: uid, MountGid: gid, MountMode: mountMode, MountCtime: fileInfo.ModTime(), MountMtime: time.Now(), Umask: umask, - })) + VolumeServerAccess: *mountOptions.volumeServerAccess, + Cipher: cipher, + UidGidMapper: uidGidMapper, + }) + + // mount + c, err := fuse.Mount(dir, options...) if err != nil { - fuse.Unmount(dir) + glog.V(0).Infof("mount: %v", err) + return true } + defer fuse.Unmount(dir) + + grace.OnInterrupt(func() { + fuse.Unmount(dir) + c.Close() + }) + + glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir) + server := fs.New(c, nil) + seaweedFileSystem.Server = server + err = server.Serve(seaweedFileSystem) // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { glog.V(0).Infof("mount process: %v", err) - daemonize.SignalOutcome(err) return true } diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..db0b4148d --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,114 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions MessageBrokerOptions +) + +type MessageBrokerOptions struct { + filer *string + ip *string + port *int + cpuprofile *string + memprofile *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port") + messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file") + messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msgBroker [-port=17777] [-filer=<ip:port>]", + Short: "start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool { + + grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile) + + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker") + cipher := false + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + break + } + } + + qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{ + Filers: []string{*msgBrokerOpt.filer}, + DefaultReplication: "", + MaxMB: 0, + Ip: *msgBrokerOpt.ip, + Port: *msgBrokerOpt.port, + Cipher: cipher, + }, grpcDialOption) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/s3.go b/weed/command/s3.go index e004bb066..c8292a7d5 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,18 +1,21 @@ package command import ( + "context" + "fmt" "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "fmt" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( @@ -21,28 +24,104 @@ var ( type S3Options struct { filer *string - filerBucketsPath *string port *int + config *string domainName *string tlsPrivateKey *string tlsCertificate *string + metricsHttpPort *int + allowEmptyFolder *bool } func init() { cmdS3.Run = runS3 // break init cycle s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") - s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") + s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") + s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders") } var cmdS3 = &Command{ - UsageLine: "s3 -port=8333 -filer=<ip:port>", + UsageLine: "s3 [-port=8333] [-filer=<ip:port>] [-config=</path/to/config.json>]", Short: "start a s3 API compatible server that is backed by a filer", Long: `start a s3 API compatible server that is backed by a filer. + By default, you can use any access key and secret key to access the S3 APIs. + To enable credential based access, create a config.json file similar to this: + +{ + "identities": [ + { + "name": "anonymous", + "actions": [ + "Read" + ] + }, + { + "name": "some_admin_user", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" + } + ], + "actions": [ + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "user_limited_to_bucket1", + "credentials": [ + { + "accessKey": "some_access_key4", + "secretKey": "some_secret_key4" + } + ], + "actions": [ + "Read:bucket1", + "List:bucket1", + "Tagging:bucket1", + "Write:bucket1" + ] + } + ] +} + `, } @@ -50,26 +129,61 @@ func runS3(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) + go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort) + return s3StandaloneOptions.startS3Server() } func (s3opt *S3Options) startS3Server() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false } + filerBucketsPath := "/buckets" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + // metrics read from the filer + var metricsAddress string + var metricsIntervalSec int + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + break + } + } + + go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec) + router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ Filer: *s3opt.filer, + Port: *s3opt.port, FilerGrpcAddress: filerGrpcAddress, + Config: *s3opt.config, DomainName: *s3opt.domainName, - BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + BucketsPath: filerBucketsPath, + GrpcDialOption: grpcDialOption, + AllowEmptyFolder: *s3opt.allowEmptyFolder, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) @@ -84,12 +198,12 @@ func (s3opt *S3Options) startS3Server() bool { } if *s3opt.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port) + glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port) + glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) if err = httpS.Serve(s3ApiListener); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index a76466ed6..88dc94df1 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -14,6 +14,14 @@ var cmdScaffold = &Command{ Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export WEED_MYSQL_PASSWORD=some_password + Environment variable rules: + * Prefix the variable name with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } @@ -36,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool { content = SECURITY_TOML_EXAMPLE case "master": content = MASTER_TOML_EXAMPLE + case "shell": + content = SHELL_TOML_EXAMPLE } if content == "" { println("need a valid -config option") @@ -59,21 +69,43 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false +# directories under this folder will be automatically creating a separate bucket +buckets_folder = "/buckets" + +#################################################### +# The following are filer store options +#################################################### + [leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable # faster than previous leveldb, recommended. enabled = true -dir = "." # directory to store level db files +dir = "./filerldb2" # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### +[leveldb3] +# similar to leveldb2. +# each bucket has its own meta store. +enabled = false +dir = "./filerldb3" # directory to store level db files -[mysql] # or tidb +[rocksdb] +# local on disk, similar to leveldb +# since it is using a C wrapper, you need to install rocksdb and build it by yourself +enabled = false +dir = "./filerrdb" # directory to store rocksdb files + +[mysql] # or memsql, tidb # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory TEXT COMMENT 'full path to parent directory', +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) BINARY COMMENT 'directory or file name', +# directory TEXT COMMENT 'full path to parent directory', # meta LONGBLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; @@ -86,9 +118,37 @@ password = "" database = "" # create or use an existing database connection_max_idle = 2 connection_max_open = 100 +connection_max_lifetime_seconds = 0 +interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" + +[mysql2] # or memsql, tidb +enabled = false +createTable = """ + CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` ( + dirhash BIGINT, + name VARCHAR(1000) BINARY, + directory TEXT, + meta LONGBLOB, + PRIMARY KEY (dirhash, name) + ) DEFAULT CHARSET=utf8; +""" +hostname = "localhost" +port = 3306 +username = "root" +password = "" +database = "" # create or use an existing database +connection_max_idle = 2 +connection_max_open = 100 +connection_max_lifetime_seconds = 0 interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" -[postgres] # or cockroachdb +[postgres] # or cockroachdb, YugabyteDB # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT, # name VARCHAR(65535), @@ -101,10 +161,40 @@ hostname = "localhost" port = 5432 username = "postgres" password = "" -database = "" # create or use an existing database +database = "postgres" # create or use an existing database +schema = "" +sslmode = "disable" +connection_max_idle = 100 +connection_max_open = 100 +connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" + +[postgres2] +enabled = false +createTable = """ + CREATE TABLE IF NOT EXISTS "%s" ( + dirhash BIGINT, + name VARCHAR(65535), + directory VARCHAR(65535), + meta bytea, + PRIMARY KEY (dirhash, name) + ); +""" +hostname = "localhost" +port = 5432 +username = "postgres" +password = "" +database = "postgres" # create or use an existing database +schema = "" sslmode = "disable" connection_max_idle = 100 connection_max_open = 100 +connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" [cassandra] # CREATE TABLE filemeta ( @@ -118,14 +208,25 @@ keyspace="seaweedfs" hosts=[ "localhost:9042", ] +username="" +password="" +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] + +[hbase] +enabled = false +zkquorum = "" +table = "seaweedfs" -[redis] +[redis2] enabled = false address = "localhost:6379" password = "" database = 0 +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] -[redis_cluster] +[redis_cluster2] enabled = false addresses = [ "localhost:30001", @@ -136,20 +237,58 @@ addresses = [ "localhost:30006", ] password = "" -// allows reads from slave servers or the master, but all writes still go to the master -readOnly = true -// automatically use the closest Redis server for reads -routeByLatency = true +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = false +# automatically use the closest Redis server for reads +routeByLatency = false +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] [etcd] enabled = false servers = "localhost:2379" timeout = "3s" -[tikv] +[mongodb] enabled = false -pdAddress = "192.168.199.113:2379" +uri = "mongodb://localhost:27017" +option_pool_size = 0 +database = "seaweedfs" +[elastic7] +enabled = false +servers = [ + "http://localhost1:9200", + "http://localhost2:9200", + "http://localhost3:9200", +] +username = "" +password = "" +sniff_enabled = false +healthcheck_enabled = false +# increase the value is recommend, be sure the value in Elastic is greater or equal here +index.max_result_window = 10000 + + + +########################## +########################## +# To add path-specific filer store: +# +# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp +# 2. Add a location configuraiton. E.g., location = "/tmp/" +# 3. Copy and customize all other configurations. +# Make sure they are not the same if using the same store type! +# 4. Set enabled to true +# +# The following is just using redis as an example +########################## +[redis2.tmp] +enabled = false +location = "/tmp/" +address = "localhost:6379" +password = "" +database = 1 ` @@ -204,7 +343,8 @@ enabled = false # This URL will Dial the RabbitMQ server at the URL in the environment # variable RABBIT_SERVER_URL and open the exchange "myexchange". # The exchange must have already been created by some other means, like -# the RabbitMQ management plugin. +# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then +# create binding myexchange => myqueue topic_url = "rabbit://myexchange" sub_url = "rabbit://myqueue" ` @@ -225,6 +365,19 @@ grpcAddress = "localhost:18888" # i.e., all files with this "prefix" are sent to notification message queue. directory = "/buckets" +[sink.local] +enabled = false +directory = "/data" +# all replicated files are under modified time as yyyy-mm-dd directories +# so each date directory contains all new and updated files. +is_incremental = false + +[sink.local_incremental] +# all replicated files are under modified time as yyyy-mm-dd directories +# so each date directory contains all new and updated files. +enabled = false +directory = "/backup" + [sink.filer] enabled = false grpcAddress = "localhost:18888" @@ -235,6 +388,7 @@ directory = "/backup" replication = "" collection = "" ttlSec = 0 +is_incremental = false [sink.s3] # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html @@ -245,6 +399,8 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil region = "us-east-2" bucket = "your_bucket_name" # an existing bucket directory = "/" # destination directory +endpoint = "" +is_incremental = false [sink.google_cloud_storage] # read credentials doc at https://cloud.google.com/docs/authentication/getting-started @@ -252,6 +408,7 @@ enabled = false google_application_credentials = "/path/to/x.json" # path to json credential file bucket = "your_bucket_seaweedfs" # an existing bucket directory = "/" # destination directory +is_incremental = false [sink.azure] # experimental, let me know if it works @@ -260,6 +417,7 @@ account_name = "" account_key = "" container = "mycontainer" # an existing container directory = "/" # destination directory +is_incremental = false [sink.backblaze] enabled = false @@ -267,6 +425,7 @@ b2_account_id = "" b2_master_application_key = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory +is_incremental = false ` @@ -293,18 +452,28 @@ expires_after_seconds = 10 # seconds # the host name is not checked, so the PERM files can be shared. [grpc] ca = "" +# Set wildcard domain for enable TLS authentication by common names +allowed_wildcard_domain = "" # .mycompany.com [grpc.volume] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.master] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.filer] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names + +[grpc.msg_broker] +cert = "" +key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" @@ -312,7 +481,6 @@ key = "" cert = "" key = "" - # volume server https options # Note: work in progress! # this does not work with other clients, e.g., "weed filer|mount" etc, yet. @@ -335,23 +503,29 @@ key = "" [master.maintenance] # periodically run these scripts are the same as running them from 'weed shell' scripts = """ + lock ec.encode -fullPercent=95 -quietFor=1h ec.rebuild -force ec.balance -force volume.balance -force + volume.fix.replication + unlock """ sleep_minutes = 17 # sleep minutes between each script execution [master.filer] -default_filer_url = "http://localhost:8888/" +default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands + [master.sequencer] -type = "memory" # Choose [memory|etcd] type for storing the file id sequence +type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence # example : http://127.0.0.1:2379,http://127.0.0.1:2389 sequencer_etcd_urls = "http://127.0.0.1:2379" +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency [storage.backend] [storage.backend.s3.default] enabled = false @@ -359,6 +533,41 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). region = "us-east-2" bucket = "your_bucket_name" # an existing bucket + endpoint = "" + +# create this number of logical volumes if no more writable volumes +# count_x means how many copies of data. +# e.g.: +# 000 has only one copy, copy_1 +# 010 and 001 has two copies, copy_2 +# 011 has only 3 copies, copy_3 +[master.volume_growth] +copy_1 = 7 # create 1 x 7 = 7 actual volumes +copy_2 = 6 # create 2 x 6 = 12 actual volumes +copy_3 = 3 # create 3 x 3 = 9 actual volumes +copy_other = 1 # create n x 1 = n actual volumes + +# configuration flags for replication +[master.replication] +# any replication counts should be considered minimums. If you specify 010 and +# have 3 different racks, that's still considered writable. Writes will still +# try to replicate to all available volumes. You should only use this option +# if you are doing your own replication or periodic sync of volumes. +treat_replication_as_minimums = false + +` + SHELL_TOML_EXAMPLE = ` + +[cluster] +default = "c1" + +[cluster.c1] +master = "localhost:9333" # comma-separated master servers +filer = "localhost:8888" # filer host and port + +[cluster.c2] +master = "" +filer = "" ` ) diff --git a/weed/command/server.go b/weed/command/server.go index 87f404ed3..6eb3bf97c 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -2,26 +2,30 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util/grace" "os" - "runtime" - "runtime/pprof" "strings" "time" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) type ServerOptions struct { cpuprofile *string + memprofile *string v VolumeServerOptions } var ( - serverOptions ServerOptions - masterOptions MasterOptions - filerOptions FilerOptions - s3Options S3Options + serverOptions ServerOptions + masterOptions MasterOptions + filerOptions FilerOptions + s3Options S3Options + webdavOptions WebDavOption + msgBrokerOptions MessageBrokerOptions ) func init() { @@ -29,7 +33,7 @@ func init() { } var cmdServer = &Command{ - UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name", + UsageLine: "server -dir=/tmp -volume.max=5 -ip=server_name", Short: "start a master server, a volume server, and optionally a filer and a S3 gateway", Long: `start both a volume server to provide storage spaces and a master server to provide volume=>location mapping service and sequence number of file ids @@ -45,24 +49,34 @@ var cmdServer = &Command{ } var ( - serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") - serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") + serverBindIp = cmdServer.Flag.String("ip.bind", "", "ip address to bind to") serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") - pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") - isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") + volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") + volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") + serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + + // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server") + isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server") + isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") + isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway") + isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker") serverWhiteList []string + + False = false ) func init() { serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") + serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file") masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") @@ -73,29 +87,52 @@ func init() { masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + masterOptions.raftResumeState = cmdServer.Flag.Bool("resumeState", false, "resume previous state on start master server") filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") - filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") - filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") + filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") - filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") + filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") + filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") + filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.") + filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") + serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") + serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") + serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") + serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") + serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") + serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, "<exprimental> enable tcp port") - s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") - s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") + s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders") + + webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port") + webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files") + webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files") + webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") + webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file") + webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") + webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") + webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB") + + msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") } @@ -104,55 +141,54 @@ func runServer(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - if *serverOptions.cpuprofile != "" { - f, err := os.Create(*serverOptions.cpuprofile) - if err != nil { - glog.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } + grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile) - if *filerOptions.redirectOnRead { + if *isStartingS3 { *isStartingFiler = true } - - if *isStartingS3 { + if *isStartingWebDav { + *isStartingFiler = true + } + if *isStartingMsgBroker { *isStartingFiler = true } - _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) - peers := strings.Join(peerList, ",") - masterOptions.peers = &peers + if *isStartingMasterServer { + _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) + peers := strings.Join(peerList, ",") + masterOptions.peers = &peers + } + // ip address masterOptions.ip = serverIp masterOptions.ipBind = serverBindIp - filerOptions.masters = &peers - filerOptions.ip = serverBindIp + filerOptions.masters = masterOptions.peers + filerOptions.ip = serverIp + filerOptions.bindIp = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp - serverOptions.v.masters = &peers + serverOptions.v.masters = masterOptions.peers serverOptions.v.idleConnectionTimeout = serverTimeout serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack + msgBrokerOptions.ip = serverIp - serverOptions.v.pulseSeconds = pulseSeconds - masterOptions.pulseSeconds = pulseSeconds + // serverOptions.v.pulseSeconds = pulseSeconds + // masterOptions.pulseSeconds = pulseSeconds masterOptions.whiteList = serverWhiteListOption filerOptions.dataCenter = serverDataCenter + filerOptions.rack = serverRack filerOptions.disableHttp = serverDisableHttp masterOptions.disableHttp = serverDisableHttp filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port) s3Options.filer = &filerAddress + webdavOptions.filer = &filerAddress + msgBrokerOptions.filer = &filerAddress - if *filerOptions.defaultReplicaPlacement == "" { - *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication - } - - runtime.GOMAXPROCS(runtime.NumCPU()) + go stats_collect.StartMetricsServer(*serverMetricsHttpPort) folders := strings.Split(*volumeDataFolders, ",") @@ -163,7 +199,7 @@ func runServer(cmd *Command, args []string) bool { if *masterOptions.metaFolder == "" { *masterOptions.metaFolder = folders[0] } - if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil { + if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil { glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) } filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder @@ -190,12 +226,33 @@ func runServer(cmd *Command, args []string) bool { }() } + if *isStartingWebDav { + go func() { + time.Sleep(2 * time.Second) + + webdavOptions.startWebDav() + + }() + } + + if *isStartingMsgBroker { + go func() { + time.Sleep(2 * time.Second) + msgBrokerOptions.startQueueServer() + }() + } + // start volume server - { - go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption) + if *isStartingVolumeServer { + go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent) + + } + + if *isStartingMasterServer { + go startMaster(masterOptions, serverWhiteList) } - startMaster(masterOptions, serverWhiteList) + select {} return true } diff --git a/weed/command/shell.go b/weed/command/shell.go index 34b5aef31..c9976e809 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -6,18 +6,19 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( - shellOptions shell.ShellOptions - shellInitialFilerUrl *string + shellOptions shell.ShellOptions + shellInitialFiler *string + shellCluster *string ) func init() { cmdShell.Run = runShell // break init cycle - shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") - shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url") + shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333") + shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888") + shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml") } var cmdShell = &Command{ @@ -25,20 +26,40 @@ var cmdShell = &Command{ Short: "run interactive administrative commands", Long: `run interactive administrative commands. + Generate shell.toml via "weed scaffold -config=shell" + `, } func runShell(command *Command, args []string) bool { util.LoadConfiguration("security", false) - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + if *shellOptions.Masters == "" && *shellInitialFiler == "" { + util.LoadConfiguration("shell", false) + v := util.GetViper() + cluster := v.GetString("cluster.default") + if *shellCluster != "" { + cluster = *shellCluster + } + if cluster == "" { + *shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888" + } else { + *shellOptions.Masters = v.GetString("cluster." + cluster + ".master") + *shellInitialFiler = v.GetString("cluster." + cluster + ".filer") + } + } + + fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler) - var filerPwdErr error - shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl) - if filerPwdErr != nil { - fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr) + var err error + shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler) + if err != nil { + fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err) return false } + shellOptions.Directory = "/" shell.RunShell(shellOptions) diff --git a/weed/command/upload.go b/weed/command/upload.go index 25e938d9b..0f9361b40 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -1,16 +1,18 @@ package command import ( + "context" "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "google.golang.org/grpc" "os" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -18,14 +20,16 @@ var ( ) type UploadOptions struct { - master *string - dir *string - include *string - replication *string - collection *string - dataCenter *string - ttl *string - maxMB *int + master *string + dir *string + include *string + replication *string + collection *string + dataCenter *string + ttl *string + diskType *string + maxMB *int + usePublicUrl *bool } func init() { @@ -37,8 +41,10 @@ func init() { upload.replication = cmdUpload.Flag.String("replication", "", "replication type") upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") + upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 4, "split files larger than the limit") + upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server") } var cmdUpload = &Command{ @@ -63,13 +69,22 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master) + if err != nil { + fmt.Printf("upload: %v", err) + return false + } + if *upload.replication == "" { + *upload.replication = defaultCollection + } if len(args) == 0 { if *upload.dir == "" { return false } - filepath.Walk(*upload.dir, func(path string, info os.FileInfo, err error) error { + filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error { if err == nil { if !info.IsDir() { if *upload.include != "" { @@ -81,9 +96,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -100,11 +113,21 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } return true } + +func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) { + err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error { + resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", masterAddress, err) + } + replication = resp.DefaultReplication + return nil + }) + return +} diff --git a/weed/command/version.go b/weed/command/version.go index 8fdd68ec8..9caf7dc4e 100644 --- a/weed/command/version.go +++ b/weed/command/version.go @@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool { cmd.Usage() } - fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) return true } diff --git a/weed/command/volume.go b/weed/command/volume.go index 3e8341ef8..9df500178 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -2,25 +2,32 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "net/http" + httppprof "net/http/pprof" "os" - "runtime" "runtime/pprof" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util/httpdown" "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -28,45 +35,62 @@ var ( ) type VolumeServerOptions struct { - port *int - publicPort *int - folders []string - folderMaxLimits []int - ip *string - publicUrl *string - bindIp *string - masters *string - pulseSeconds *int - idleConnectionTimeout *int - dataCenter *string - rack *string - whiteList []string - indexType *string - fixJpgOrientation *bool - readRedirect *bool - cpuProfile *string - memProfile *string - compactionMBPerSecond *int + port *int + publicPort *int + folders []string + folderMaxLimits []int + idxFolder *string + ip *string + publicUrl *string + bindIp *string + masters *string + idleConnectionTimeout *int + dataCenter *string + rack *string + whiteList []string + indexType *string + diskType *string + fixJpgOrientation *bool + readRedirect *bool + cpuProfile *string + memProfile *string + compactionMBPerSecond *int + fileSizeLimitMB *int + concurrentUploadLimitMB *int + minFreeSpacePercents []float32 + pprof *bool + preStopSeconds *int + metricsHttpPort *int + // pulseSeconds *int + enableTcp *bool } func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", "", "ip or server name") + v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") - v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to") v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") - v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") + v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") + // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") + v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") + v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") + v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") + v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") + v.enableTcp = cmdVolume.Flag.Bool("tcp", false, "<exprimental> enable tcp port") } var cmdVolume = &Command{ @@ -79,26 +103,39 @@ var cmdVolume = &Command{ var ( volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...") + maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") ) func runVolume(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - runtime.GOMAXPROCS(runtime.NumCPU()) - util.SetupProfiling(*v.cpuProfile, *v.memProfile) + // If --pprof is set we assume the caller wants to be able to collect + // cpu and memory profiles via go tool pprof + if !*v.pprof { + grace.SetupProfiling(*v.cpuProfile, *v.memProfile) + } + + go stats_collect.StartMetricsServer(*v.metricsHttpPort) - v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption) + v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent) return true } -func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) { +func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) { // Set multiple folders and each folder's max volume count limit' v.folders = strings.Split(volumeFolders, ",") + for _, folder := range v.folders { + if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil { + glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + } + } + + // set max maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { if max, e := strconv.Atoi(maxString); e == nil { @@ -107,14 +144,47 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v glog.Fatalf("The max specified in -max not a valid number %s", maxString) } } + if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + v.folderMaxLimits = append(v.folderMaxLimits, v.folderMaxLimits[0]) + } + } if len(v.folders) != len(v.folderMaxLimits) { glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) } - for _, folder := range v.folders { - if err := util.TestFolderWritable(folder); err != nil { - glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + + // set minFreeSpacePercent + minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",") + for _, freeString := range minFreeSpacePercentStrings { + if value, e := strconv.ParseFloat(freeString, 32); e == nil { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value)) + } else { + glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString) + } + } + if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0]) + } + } + if len(v.folders) != len(v.minFreeSpacePercents) { + glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents)) + } + + // set disk types + var diskTypes []types.DiskType + diskTypeStrings := strings.Split(*v.diskType, ",") + for _, diskTypeString := range diskTypeStrings { + diskTypes = append(diskTypes, types.ToDiskType(diskTypeString)) + } + if len(diskTypes) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + diskTypes = append(diskTypes, diskTypes[0]) } } + if len(v.folders) != len(diskTypes) { + glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes)) + } // security related white list configuration if volumeWhiteListOption != "" { @@ -122,7 +192,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } if *v.ip == "" { - *v.ip = "127.0.0.1" + *v.ip = util.DetectedHostAddress() + glog.V(0).Infof("detected volume server ip address: %v", *v.ip) } if *v.publicPort == 0 { @@ -138,6 +209,14 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v publicVolumeMux = http.NewServeMux() } + if *v.pprof { + volumeMux.HandleFunc("/debug/pprof/", httppprof.Index) + volumeMux.HandleFunc("/debug/pprof/cmdline", httppprof.Cmdline) + volumeMux.HandleFunc("/debug/pprof/profile", httppprof.Profile) + volumeMux.HandleFunc("/debug/pprof/symbol", httppprof.Symbol) + volumeMux.HandleFunc("/debug/pprof/trace", httppprof.Trace) + } + volumeNeedleMapKind := storage.NeedleMapInMemory switch *v.indexType { case "leveldb": @@ -152,14 +231,16 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux, *v.ip, *v.port, *v.publicUrl, - v.folders, v.folderMaxLimits, + v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes, + *v.idxFolder, volumeNeedleMapKind, - strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, + strings.Split(masters, ","), 5, *v.dataCenter, *v.rack, v.whiteList, *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, + *v.fileSizeLimitMB, + int64(*v.concurrentUploadLimitMB)*1024*1024, ) - // starting grpc server grpcS := v.startGrpcService(volumeServer) @@ -172,50 +253,56 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } + // starting tcp server + if *v.enableTcp { + go v.startTcpService(volumeServer) + } + // starting the cluster http server clusterHttpServer := v.startClusterHttpService(volumeMux) - stopChain := make(chan struct{}) - util.OnInterrupt(func() { + stopChan := make(chan bool) + grace.OnInterrupt(func() { fmt.Println("volume server has be killed") - var startTime time.Time - - // firstly, stop the public http service to prevent from receiving new user request - if nil != publicHttpDown { - startTime = time.Now() - if err := publicHttpDown.Stop(); err != nil { - glog.Warningf("stop the public http server failed, %v", err) - } - delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("stop public http server, elapsed %dms", delta) - } - startTime = time.Now() - if err := clusterHttpServer.Stop(); err != nil { - glog.Warningf("stop the cluster http server failed, %v", err) + // Stop heartbeats + if !volumeServer.StopHeartbeat() { + glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds) + time.Sleep(time.Duration(*v.preStopSeconds) * time.Second) } - delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta) - startTime = time.Now() - grpcS.GracefulStop() - delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta) + shutdown(publicHttpDown, clusterHttpServer, grpcS, volumeServer) + stopChan <- true + }) - startTime = time.Now() - volumeServer.Shutdown() - delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("stop volume server, elapsed [%d]", delta) + select { + case <-stopChan: + } - pprof.StopCPUProfile() +} - close(stopChain) // notify exit - }) +func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server, grpcS *grpc.Server, volumeServer *weed_server.VolumeServer) { - select { - case <-stopChain: + // firstly, stop the public http service to prevent from receiving new user request + if nil != publicHttpDown { + glog.V(0).Infof("stop public http server ... ") + if err := publicHttpDown.Stop(); err != nil { + glog.Warningf("stop the public http server failed, %v", err) + } + } + + glog.V(0).Infof("graceful stop cluster http server ... ") + if err := clusterHttpServer.Stop(); err != nil { + glog.Warningf("stop the cluster http server failed, %v", err) } - glog.Warningf("the volume server exit.") + + glog.V(0).Infof("graceful stop gRPC ...") + grpcS.GracefulStop() + + volumeServer.Shutdown() + + pprof.StopCPUProfile() + } // check whether configure the public port @@ -229,7 +316,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) go func() { @@ -242,7 +329,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -269,7 +356,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd } listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) + glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -288,3 +375,22 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd }() return clusterHttpServer } + +func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) { + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress) + listener, e := util.NewListener(listeningAddress, 0) + if e != nil { + glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e) + } + defer listener.Close() + + for { + c, err := listener.Accept() + if err != nil { + fmt.Println(err) + return + } + go volumeServer.HandleTcpConnection(c) + } +} diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 371c4a9ad..781ea1e36 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -1,17 +1,20 @@ package command import ( + "context" "fmt" "net/http" + "os" "os/user" "strconv" "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -22,8 +25,12 @@ type WebDavOption struct { filer *string port *int collection *string + replication *string + disk *string tlsPrivateKey *string tlsCertificate *string + cacheDir *string + cacheSizeMB *int64 } func init() { @@ -31,13 +38,17 @@ func init() { webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address") webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port") webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") + webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files") + webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file") webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") + webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks") + webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB") } var cmdWebDav = &Command{ UsageLine: "webdav -port=7333 -filer=<ip:port>", - Short: "<unstable> start a webdav server that is backed by a filer", + Short: "start a webdav server that is backed by a filer", Long: `start a webdav server that is backed by a filer. `, @@ -47,7 +58,7 @@ func runWebDav(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port) + glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port) return webDavStandaloneOptions.startWebDav() @@ -55,12 +66,6 @@ func runWebDav(cmd *Command, args []string) bool { func (wo *WebDavOption) startWebDav() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer) - if err != nil { - glog.Fatal(err) - return false - } - // detect current user uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil { @@ -72,13 +77,47 @@ func (wo *WebDavOption) startWebDav() bool { } } + // parse filer grpc address + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var cipher bool + // connect to filer + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + break + } + } + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: grpcDialOption, Collection: *wo.collection, + Replication: *wo.replication, + DiskType: *wo.disk, Uid: uid, Gid: gid, + Cipher: cipher, + CacheDir: util.ResolvePath(*wo.cacheDir), + CacheSizeMB: *wo.cacheSizeMB, }) if webdavServer_err != nil { glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) @@ -93,12 +132,12 @@ func (wo *WebDavOption) startWebDav() bool { } if *wo.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port) + glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port) if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port) + glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port) if err = httpS.Serve(webDavListener); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go new file mode 100644 index 000000000..ab8f6bcbd --- /dev/null +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -0,0 +1,364 @@ +package abstract_sql + +import ( + "context" + "database/sql" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" + "sync" +) + +type SqlGenerator interface { + GetSqlInsert(tableName string) string + GetSqlUpdate(tableName string) string + GetSqlFind(tableName string) string + GetSqlDelete(tableName string) string + GetSqlDeleteFolderChildren(tableName string) string + GetSqlListExclusive(tableName string) string + GetSqlListInclusive(tableName string) string + GetSqlCreateTable(tableName string) string + GetSqlDropTable(tableName string) string +} + +type AbstractSqlStore struct { + SqlGenerator + DB *sql.DB + SupportBucketTable bool + dbs map[string]bool + dbsLock sync.Mutex +} + +func (store *AbstractSqlStore) OnBucketCreation(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.CreateTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + store.dbs[bucket] = true +} +func (store *AbstractSqlStore) OnBucketDeletion(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.deleteTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + delete(store.dbs, bucket) +} + +const ( + DEFAULT_TABLE = "filemeta" +) + +type TxOrDB interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { + tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelReadCommitted, + ReadOnly: false, + }) + if err != nil { + return ctx, err + } + + return context.WithValue(ctx, "tx", tx), nil +} +func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Commit() + } + return nil +} +func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Rollback() + } + return nil +} + +func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) { + + shortPath = fullpath + bucket = DEFAULT_TABLE + + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + txOrDB = tx + } else { + txOrDB = store.DB + } + + if !store.SupportBucketTable { + return + } + + if !strings.HasPrefix(string(fullpath), "/buckets/") { + return + } + + // detect bucket + bucketAndObjectKey := string(fullpath)[len("/buckets/"):] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 && !isForChildren { + return + } + bucket = bucketAndObjectKey + shortPath = "/" + if t > 0 { + bucket = bucketAndObjectKey[:t] + shortPath = util.FullPath(bucketAndObjectKey[t:]) + } + + if isValidBucket(bucket) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + if store.dbs == nil { + store.dbs = make(map[string]bool) + } + + if _, found := store.dbs[bucket]; !found { + if err = store.CreateTable(ctx, bucket); err == nil { + store.dbs[bucket] = true + } + } + + } + + return +} + +func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta) + if err == nil { + return + } + + if !strings.Contains(strings.ToLower(err.Error()), "duplicate") { + // return fmt.Errorf("insert: %s", err) + // skip this since the error can be in a different language + } + + // now the insert failed possibly due to duplication constraints + glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err) + + res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("upsert %s: %s", entry.FullPath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err) + } + return nil + +} + +func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("update %s: %s", entry.FullPath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err) + } + return nil +} + +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false) + if err != nil { + return nil, fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir) + + var data []byte + if err := row.Scan(&data); err != nil { + if err == sql.ErrNoRows { + return nil, filer_pb.ErrNotFound + } + return nil, fmt.Errorf("find %s: %v", fullpath, err) + } + + entry := &filer.Entry{ + FullPath: fullpath, + } + if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + + res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("delete %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err) + } + + return nil +} + +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + if isValidBucket(bucket) && shortPath == "/" { + if err = store.deleteTable(ctx, bucket); err == nil { + store.dbsLock.Lock() + delete(store.dbs, bucket) + store.dbsLock.Unlock() + return nil + } else { + return err + } + } + + glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + + res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) + } + + return nil +} + +func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true) + if err != nil { + return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err) + } + + sqlText := store.GetSqlListExclusive(bucket) + if includeStartFile { + sqlText = store.GetSqlListInclusive(bucket) + } + + rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1) + if err != nil { + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) + } + defer rows.Close() + + for rows.Next() { + var name string + var data []byte + if err = rows.Scan(&name, &data); err != nil { + glog.V(0).Infof("scan %s : %v", dirPath, err) + return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err) + } + lastFileName = name + + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), name), + } + if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) + return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) + } + + if !eachEntryFunc(entry) { + break + } + + } + + return lastFileName, nil +} + +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil) +} + +func (store *AbstractSqlStore) Shutdown() { + store.DB.Close() +} + +func isValidBucket(bucket string) bool { + return bucket != DEFAULT_TABLE && bucket != "" +} + +func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error { + if !store.SupportBucketTable { + return nil + } + _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket)) + return err +} + +func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error { + if !store.SupportBucketTable { + return nil + } + _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket)) + return err +} diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go new file mode 100644 index 000000000..03b016c76 --- /dev/null +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -0,0 +1,105 @@ +package abstract_sql + +import ( + "context" + "database/sql" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + + res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value) + if err == nil { + return + } + + if !strings.Contains(strings.ToLower(err.Error()), "duplicate") { + // return fmt.Errorf("kv insert: %s", err) + // skip this since the error can be in a different language + } + + // now the insert failed possibly due to duplication constraints + glog.V(1).Infof("kv insert falls back to update: %s", err) + + res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr) + if err != nil { + return fmt.Errorf("kv upsert: %s", err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("kv upsert no rows affected: %s", err) + } + return nil + +} + +func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return nil, fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr) + + err = row.Scan(&value) + + if err == sql.ErrNoRows { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + + res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr) + if err != nil { + return fmt.Errorf("kv delete: %s", err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("kv delete no rows affected: %s", err) + } + + return nil + +} + +func genDirAndName(key []byte) (dirStr string, dirHash int64, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dirHash = int64(util.BytesToUint64(key[:8])) + dirStr = base64.StdEncoding.EncodeToString(key[:8]) + name = base64.StdEncoding.EncodeToString(key[8:]) + + return +} diff --git a/weed/filer2/cassandra/README.txt b/weed/filer/cassandra/README.txt index 122c9c3f4..122c9c3f4 100644 --- a/weed/filer2/cassandra/README.txt +++ b/weed/filer/cassandra/README.txt diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go new file mode 100644 index 000000000..fd2ce91a6 --- /dev/null +++ b/weed/filer/cassandra/cassandra_store.go @@ -0,0 +1,212 @@ +package cassandra + +import ( + "context" + "fmt" + "github.com/gocql/gocql" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer.Stores = append(filer.Stores, &CassandraStore{}) +} + +type CassandraStore struct { + cluster *gocql.ClusterConfig + session *gocql.Session + superLargeDirectoryHash map[string]string +} + +func (store *CassandraStore) GetName() string { + return "cassandra" +} + +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) { + dirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir] + return +} + +func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string) (err error) { + store.cluster = gocql.NewCluster(hosts...) + if username != "" && password != "" { + store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password} + } + store.cluster.Keyspace = keyspace + store.cluster.Consistency = gocql.LocalQuorum + store.session, err = store.cluster.CreateSession() + if err != nil { + glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) + } + + // set directory hash + store.superLargeDirectoryHash = make(map[string]string) + existingHash := make(map[string]string) + for _, dir := range superLargeDirectories { + // adding dir hash to avoid duplicated names + dirHash := util.Md5String([]byte(dir))[:4] + store.superLargeDirectoryHash[dir] = dirHash + if existingDir, found := existingHash[dirHash]; found { + glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + } + existingHash[dirHash] = dir + } + return +} + +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *CassandraStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + dir, name := entry.FullPath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + if err := store.session.Query( + "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", + dir, name, meta, entry.TtlSec).Exec(); err != nil { + return fmt.Errorf("insert %s: %s", entry.FullPath, err) + } + + return nil +} + +func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + dir, name := fullpath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + var data []byte + if err := store.session.Query( + "SELECT meta FROM filemeta WHERE directory=? AND name=?", + dir, name).Consistency(gocql.One).Scan(&data); err != nil { + if err != gocql.ErrNotFound { + return nil, filer_pb.ErrNotFound + } + } + + if len(data) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + dir, name := fullpath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=? AND name=?", + dir, name).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok { + return nil // filer.ErrUnsupportedSuperLargeDirectoryListing + } + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=?", + fullpath).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok { + return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing + } + + cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" + if includeStartFile { + cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" + } + + var data []byte + var name string + iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter() + for iter.Scan(&name, &data) { + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), name), + } + lastFileName = name + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + if err := iter.Close(); err != nil { + glog.V(0).Infof("list iterator close: %v", err) + } + + return lastFileName, err +} + +func (store *CassandraStore) Shutdown() { + store.session.Close() +} diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go new file mode 100644 index 000000000..dafa9bb15 --- /dev/null +++ b/weed/filer/cassandra/cassandra_store_kv.go @@ -0,0 +1,62 @@ +package cassandra + +import ( + "context" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/gocql/gocql" +) + +func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", + dir, name, value, 0).Exec(); err != nil { + return fmt.Errorf("kv insert: %s", err) + } + + return nil +} + +func (store *CassandraStore) KvGet(ctx context.Context, key []byte) (data []byte, err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "SELECT meta FROM filemeta WHERE directory=? AND name=?", + dir, name).Consistency(gocql.One).Scan(&data); err != nil { + if err != gocql.ErrNotFound { + return nil, filer.ErrKvNotFound + } + } + + if len(data) == 0 { + return nil, filer.ErrKvNotFound + } + + return data, nil +} + +func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=? AND name=?", + dir, name).Exec(); err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} + +func genDirAndName(key []byte) (dir string, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dir = base64.StdEncoding.EncodeToString(key[:8]) + name = base64.StdEncoding.EncodeToString(key[8:]) + + return +} diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go new file mode 100644 index 000000000..9ef2f3e0f --- /dev/null +++ b/weed/filer/configuration.go @@ -0,0 +1,93 @@ +package filer + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "os" + "reflect" + "strings" +) + +var ( + Stores []FilerStore +) + +func (f *Filer) LoadConfiguration(config *util.ViperProxy) { + + validateOneEnabledStore(config) + + // load configuration for default filer store + hasDefaultStoreConfigured := false + for _, store := range Stores { + if config.GetBool(store.GetName() + ".enabled") { + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) + if err := store.Initialize(config, store.GetName()+"."); err != nil { + glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + } + f.SetStore(store) + glog.V(0).Infof("configured filer store to %s", store.GetName()) + hasDefaultStoreConfigured = true + break + } + } + + if !hasDefaultStoreConfigured { + println() + println("Supported filer stores are:") + for _, store := range Stores { + println(" " + store.GetName()) + } + os.Exit(-1) + } + + // load path-specific filer store here + // f.Store.AddPathSpecificStore(path, store) + storeNames := make(map[string]FilerStore) + for _, store := range Stores { + storeNames[store.GetName()] = store + } + allKeys := config.AllKeys() + for _, key := range allKeys { + if !strings.HasSuffix(key, ".enabled") { + continue + } + key = key[:len(key)-len(".enabled")] + if !strings.Contains(key, ".") { + continue + } + + parts := strings.Split(key, ".") + storeName, storeId := parts[0], parts[1] + + store, found := storeNames[storeName] + if !found { + continue + } + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) + if err := store.Initialize(config, key+"."); err != nil { + glog.Fatalf("Failed to initialize store for %s: %+v", key, err) + } + location := config.GetString(key + ".location") + if location == "" { + glog.Errorf("path-specific filer store needs %s", key+".location") + os.Exit(-1) + } + f.Store.AddPathSpecificStore(location, storeId, store) + + glog.V(0).Infof("configure filer %s for %s", store.GetName(), location) + } + +} + +func validateOneEnabledStore(config *util.ViperProxy) { + enabledStore := "" + for _, store := range Stores { + if config.GetBool(store.GetName() + ".enabled") { + if enabledStore == "" { + enabledStore = store.GetName() + } else { + glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) + } + } + } +} diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go new file mode 100644 index 000000000..a16e5ebca --- /dev/null +++ b/weed/filer/elastic/v7/elastic_store.go @@ -0,0 +1,307 @@ +package elastic + +import ( + "context" + "fmt" + "math" + "strings" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" + jsoniter "github.com/json-iterator/go" + elastic "github.com/olivere/elastic/v7" +) + +var ( + indexType = "_doc" + indexPrefix = ".seaweedfs_" + indexKV = ".seaweedfs_kv_entries" + kvMappings = ` { + "mappings": { + "enabled": false, + "properties": { + "Value":{ + "type": "binary" + } + } + } + }` +) + +type ESEntry struct { + ParentId string `json:"ParentId"` + Entry *filer.Entry +} + +type ESKVEntry struct { + Value []byte `json:"Value"` +} + +func init() { + filer.Stores = append(filer.Stores, &ElasticStore{}) +} + +type ElasticStore struct { + client *elastic.Client + maxPageSize int +} + +func (store *ElasticStore) GetName() string { + return "elastic7" +} + +func (store *ElasticStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + options := []elastic.ClientOptionFunc{} + servers := configuration.GetStringSlice(prefix + "servers") + options = append(options, elastic.SetURL(servers...)) + username := configuration.GetString(prefix + "username") + password := configuration.GetString(prefix + "password") + if username != "" && password != "" { + options = append(options, elastic.SetBasicAuth(username, password)) + } + options = append(options, elastic.SetSniff(configuration.GetBool(prefix+"sniff_enabled"))) + options = append(options, elastic.SetHealthcheck(configuration.GetBool(prefix+"healthcheck_enabled"))) + store.maxPageSize = configuration.GetInt(prefix + "index.max_result_window") + if store.maxPageSize <= 0 { + store.maxPageSize = 10000 + } + glog.Infof("filer store elastic endpoints: %v.", servers) + return store.initialize(options) +} + +func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err error) { + ctx := context.Background() + store.client, err = elastic.NewClient(options...) + if err != nil { + return fmt.Errorf("init elastic %v.", err) + } + if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok { + _, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx) + if err != nil { + return fmt.Errorf("create index(%s) %v.", indexKV, err) + } + } + return nil +} + +func (store *ElasticStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *ElasticStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *ElasticStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + index := getIndex(entry.FullPath, false) + dir, _ := entry.FullPath.DirAndName() + id := weed_util.Md5String([]byte(entry.FullPath)) + esEntry := &ESEntry{ + ParentId: weed_util.Md5String([]byte(dir)), + Entry: entry, + } + value, err := jsoniter.Marshal(esEntry) + if err != nil { + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) + } + _, err = store.client.Index(). + Index(index). + Type(indexType). + Id(id). + BodyJson(string(value)). + Do(ctx) + if err != nil { + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) + } + return nil +} + +func (store *ElasticStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + index := getIndex(fullpath, false) + id := weed_util.Md5String([]byte(fullpath)) + searchResult, err := store.client.Get(). + Index(index). + Type(indexType). + Id(id). + Do(ctx) + if elastic.IsNotFound(err) { + return nil, filer_pb.ErrNotFound + } + if searchResult != nil && searchResult.Found { + esEntry := &ESEntry{ + ParentId: "", + Entry: &filer.Entry{}, + } + err := jsoniter.Unmarshal(searchResult.Source, esEntry) + return esEntry.Entry, err + } + glog.Errorf("find entry(%s),%v.", string(fullpath), err) + return nil, filer_pb.ErrNotFound +} + +func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + index := getIndex(fullpath, false) + id := weed_util.Md5String([]byte(fullpath)) + if strings.Count(string(fullpath), "/") == 1 { + return store.deleteIndex(ctx, index) + } + return store.deleteEntry(ctx, index, id) +} + +func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err error) { + deleteResult, err := store.client.DeleteIndex(index).Do(ctx) + if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { + return nil + } + glog.Errorf("delete index(%s) %v.", index, err) + return err +} + +func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (err error) { + deleteResult, err := store.client.Delete(). + Index(index). + Type(indexType). + Id(id). + Do(ctx) + if err == nil { + if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" { + return nil + } + } + glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + return fmt.Errorf("delete entry %v.", err) +} + +func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool { + if err := store.DeleteEntry(ctx, entry.FullPath); err != nil { + glog.Errorf("elastic delete %s: %v.", entry.FullPath, err) + return false + } + return true + }) + return +} + +func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc) +} + +func (store *ElasticStore) listDirectoryEntries( + ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + first := true + index := getIndex(fullpath, true) + nextStart := "" + parentId := weed_util.Md5String([]byte(fullpath)) + if _, err = store.client.Refresh(index).Do(ctx); err != nil { + if elastic.IsNotFound(err) { + store.client.CreateIndex(index).Do(ctx) + return + } + } + for { + result := &elastic.SearchResult{} + if (startFileName == "" && first) || inclusive { + if result, err = store.search(ctx, index, parentId); err != nil { + glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + return + } + } else { + fullPath := string(fullpath) + "/" + startFileName + if !first { + fullPath = nextStart + } + after := weed_util.Md5String([]byte(fullPath)) + if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { + glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + return + } + } + first = false + for _, hit := range result.Hits.Hits { + esEntry := &ESEntry{ + ParentId: "", + Entry: &filer.Entry{}, + } + if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil { + limit-- + if limit < 0 { + return lastFileName, nil + } + nextStart = string(esEntry.Entry.FullPath) + fileName := esEntry.Entry.FullPath.Name() + if fileName == startFileName && !inclusive { + continue + } + if !eachEntryFunc(esEntry.Entry) { + break + } + lastFileName = fileName + } + } + if len(result.Hits.Hits) < store.maxPageSize { + break + } + } + return +} + +func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) { + if count, err := store.client.Count(index).Do(ctx); err == nil && count == 0 { + return &elastic.SearchResult{ + Hits: &elastic.SearchHits{ + Hits: make([]*elastic.SearchHit, 0)}, + }, nil + } + queryResult, err := store.client.Search(). + Index(index). + Query(elastic.NewMatchQuery("ParentId", parentId)). + Size(store.maxPageSize). + Sort("_id", false). + Do(ctx) + return queryResult, err +} + +func (store *ElasticStore) searchAfter(ctx context.Context, index, parentId, after string) (result *elastic.SearchResult, err error) { + queryResult, err := store.client.Search(). + Index(index). + Query(elastic.NewMatchQuery("ParentId", parentId)). + SearchAfter(after). + Size(store.maxPageSize). + Sort("_id", false). + Do(ctx) + return queryResult, err + +} + +func (store *ElasticStore) Shutdown() { + store.client.Stop() +} + +func getIndex(fullpath weed_util.FullPath, isDirectory bool) string { + path := strings.Split(string(fullpath), "/") + if isDirectory && len(path) >= 2 { + return indexPrefix + strings.ToLower(path[1]) + } + if len(path) > 2 { + return indexPrefix + strings.ToLower(path[1]) + } + if len(path) == 2 { + return indexPrefix + } + return "" +} diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go new file mode 100644 index 000000000..99c03314e --- /dev/null +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -0,0 +1,65 @@ +package elastic + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + + "github.com/chrislusf/seaweedfs/weed/glog" + jsoniter "github.com/json-iterator/go" + elastic "github.com/olivere/elastic/v7" +) + +func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) { + deleteResult, err := store.client.Delete(). + Index(indexKV). + Type(indexType). + Id(string(key)). + Do(ctx) + if err == nil { + if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" { + return nil + } + } + glog.Errorf("delete key(id:%s) %v.", string(key), err) + return fmt.Errorf("delete key %v.", err) +} + +func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + searchResult, err := store.client.Get(). + Index(indexKV). + Type(indexType). + Id(string(key)). + Do(ctx) + if elastic.IsNotFound(err) { + return value, filer.ErrKvNotFound + } + if searchResult != nil && searchResult.Found { + esEntry := &ESKVEntry{} + if err := jsoniter.Unmarshal(searchResult.Source, esEntry); err == nil { + return esEntry.Value, nil + } + } + glog.Errorf("find key(%s),%v.", string(key), err) + return value, filer.ErrKvNotFound +} + +func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + esEntry := &ESKVEntry{value} + val, err := jsoniter.Marshal(esEntry) + if err != nil { + glog.Errorf("insert key(%s) %v.", string(key), err) + return fmt.Errorf("insert key %v.", err) + } + _, err = store.client.Index(). + Index(indexKV). + Type(indexType). + Id(string(key)). + BodyJson(string(val)). + Do(ctx) + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + return nil +} diff --git a/weed/filer/entry.go b/weed/filer/entry.go new file mode 100644 index 000000000..b7c8370e6 --- /dev/null +++ b/weed/filer/entry.go @@ -0,0 +1,113 @@ +package filer + +import ( + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type Attr struct { + Mtime time.Time // time of last modification + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Uid uint32 // owner uid + Gid uint32 // group gid + Mime string // mime type + Replication string // replication + Collection string // collection name + TtlSec int32 // ttl in seconds + DiskType string + UserName string + GroupNames []string + SymlinkTarget string + Md5 []byte + FileSize uint64 +} + +func (attr Attr) IsDirectory() bool { + return attr.Mode&os.ModeDir > 0 +} + +type Entry struct { + util.FullPath + + Attr + Extended map[string][]byte + + // the following is for files + Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` + + HardLinkId HardLinkId + HardLinkCounter int32 + Content []byte +} + +func (entry *Entry) Size() uint64 { + return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content))) +} + +func (entry *Entry) Timestamp() time.Time { + if entry.IsDirectory() { + return entry.Crtime + } else { + return entry.Mtime + } +} + +func (entry *Entry) ToProtoEntry() *filer_pb.Entry { + if entry == nil { + return nil + } + return &filer_pb.Entry{ + Name: entry.FullPath.Name(), + IsDirectory: entry.IsDirectory(), + Attributes: EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, + } +} + +func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { + if entry == nil { + return nil + } + dir, _ := entry.FullPath.DirAndName() + return &filer_pb.FullEntry{ + Dir: dir, + Entry: entry.ToProtoEntry(), + } +} + +func (entry *Entry) Clone() *Entry { + return &Entry{ + FullPath: entry.FullPath, + Attr: entry.Attr, + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + } +} + +func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry { + return &Entry{ + FullPath: util.NewFullPath(dir, entry.Name), + Attr: PbToEntryAttribute(entry.Attributes), + Chunks: entry.Chunks, + HardLinkId: HardLinkId(entry.HardLinkId), + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, + } +} + +func maxUint64(x, y uint64) uint64 { + if x > y { + return x + } + return y +} diff --git a/weed/filer2/entry_codec.go b/weed/filer/entry_codec.go index 3a2dc6134..4c613f068 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer/entry_codec.go @@ -1,4 +1,4 @@ -package filer2 +package filer import ( "bytes" @@ -13,9 +13,12 @@ import ( func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { message := &filer_pb.Entry{ - Attributes: EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, + Attributes: EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, } return proto.Marshal(message) } @@ -34,6 +37,10 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { entry.Chunks = message.Chunks + entry.HardLinkId = message.HardLinkId + entry.HardLinkCounter = message.HardLinkCounter + entry.Content = message.Content + return nil } @@ -49,9 +56,12 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes { Collection: entry.Attr.Collection, Replication: entry.Attr.Replication, TtlSec: entry.Attr.TtlSec, + DiskType: entry.Attr.DiskType, UserName: entry.Attr.UserName, GroupName: entry.Attr.GroupNames, SymlinkTarget: entry.Attr.SymlinkTarget, + Md5: entry.Attr.Md5, + FileSize: entry.Attr.FileSize, } } @@ -59,6 +69,10 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr { t := Attr{} + if attr == nil { + return t + } + t.Crtime = time.Unix(attr.Crtime, 0) t.Mtime = time.Unix(attr.Mtime, 0) t.Mode = os.FileMode(attr.FileMode) @@ -68,9 +82,12 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr { t.Collection = attr.Collection t.Replication = attr.Replication t.TtlSec = attr.TtlSec + t.DiskType = attr.DiskType t.UserName = attr.UserName t.GroupNames = attr.GroupName t.SymlinkTarget = attr.SymlinkTarget + t.Md5 = attr.Md5 + t.FileSize = attr.FileSize return t } @@ -93,11 +110,25 @@ func EqualEntry(a, b *Entry) bool { return false } + if !bytes.Equal(a.Md5, b.Md5) { + return false + } + for i := 0; i < len(a.Chunks); i++ { if !proto.Equal(a.Chunks[i], b.Chunks[i]) { return false } } + + if !bytes.Equal(a.HardLinkId, b.HardLinkId) { + return false + } + if a.HardLinkCounter != b.HardLinkCounter { + return false + } + if !bytes.Equal(a.Content, b.Content) { + return false + } return true } diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index 2eb9e3e86..71ed738f9 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -1,15 +1,18 @@ package etcd import ( + "bytes" "context" "fmt" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "go.etcd.io/etcd/clientv3" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" - "go.etcd.io/etcd/clientv3" ) const ( @@ -17,7 +20,7 @@ const ( ) func init() { - filer2.Stores = append(filer2.Stores, &EtcdStore{}) + filer.Stores = append(filer.Stores, &EtcdStore{}) } type EtcdStore struct { @@ -28,13 +31,13 @@ func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) { - servers := configuration.GetString("servers") +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") if servers == "" { servers = "localhost:2379" } - timeout := configuration.GetString("timeout") + timeout := configuration.GetString(prefix + "timeout") if timeout == "" { timeout = "3s" } @@ -71,41 +74,45 @@ func (store *EtcdStore) RollbackTransaction(ctx context.Context) error { return nil } -func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { key := genKey(entry.DirAndName()) - value, err := entry.EncodeAttributesAndChunks() + meta, err := entry.EncodeAttributesAndChunks() if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if _, err := store.client.Put(ctx, string(key), string(value)); err != nil { + if len(entry.Chunks) > 50 { + meta = weed_util.MaybeGzipData(meta) + } + + if _, err := store.client.Put(ctx, string(key), string(meta)); err != nil { return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } return nil } -func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { key := genKey(fullpath.DirAndName()) resp, err := store.client.Get(ctx, string(key)) if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } if len(resp.Kvs) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(resp.Kvs[0].Value)) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -113,7 +120,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) return entry, nil } -func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) if _, err := store.client.Delete(ctx, string(key)); err != nil { @@ -123,7 +130,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } -func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { @@ -133,41 +140,53 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer return nil } -func (store *EtcdStore) ListDirectoryEntries( - ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int, -) (entries []*filer2.Entry, err error) { - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") +func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} - resp, err := store.client.Get(ctx, string(directoryPrefix), +func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + directoryPrefix := genDirectoryKeyPrefix(dirPath, "") + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + resp, err := store.client.Get(ctx, string(lastFileStart), clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend)) if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) } for _, kv := range resp.Kvs { + if !bytes.HasPrefix(kv.Key, directoryPrefix) { + break + } fileName := getNameFromKey(kv.Key) if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } + lastFileName = fileName } - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string) (key []byte) { @@ -177,7 +196,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { @@ -194,3 +213,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *EtcdStore) Shutdown() { + store.client.Close() +} diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go new file mode 100644 index 000000000..df252f46c --- /dev/null +++ b/weed/filer/etcd/etcd_store_kv.go @@ -0,0 +1,44 @@ +package etcd + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" +) + +func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.client.Put(ctx, string(key), string(value)) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + resp, err := store.client.Get(ctx, string(key)) + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + if len(resp.Kvs) == 0 { + return nil, filer.ErrKvNotFound + } + + return resp.Kvs[0].Value, nil +} + +func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.client.Delete(ctx, string(key)) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go new file mode 100644 index 000000000..c709dc819 --- /dev/null +++ b/weed/filer/filechunk_manifest.go @@ -0,0 +1,194 @@ +package filer + +import ( + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "io" + "math" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + ManifestBatch = 1000 +) + +func HasChunkManifest(chunks []*filer_pb.FileChunk) bool { + for _, chunk := range chunks { + if chunk.IsChunkManifest { + return true + } + } + return false +} + +func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonManifestChunks []*filer_pb.FileChunk) { + for _, c := range chunks { + if c.IsChunkManifest { + manifestChunks = append(manifestChunks, c) + } else { + nonManifestChunks = append(nonManifestChunks, c) + } + } + return +} + +func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) { + // TODO maybe parallel this + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + dataChunks = append(dataChunks, chunk) + continue + } + + resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk) + if err != nil { + return chunks, nil, err + } + + manifestChunks = append(manifestChunks, chunk) + // recursive + dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks) + if subErr != nil { + return chunks, nil, subErr + } + dataChunks = append(dataChunks, dchunks...) + manifestChunks = append(manifestChunks, mchunks...) + } + return +} + +func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) { + if !chunk.IsChunkManifest { + return + } + + // IsChunkManifest + data, err := fetchChunk(lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed) + if err != nil { + return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err) + } + m := &filer_pb.FileChunkManifest{} + if err := proto.Unmarshal(data, m); err != nil { + return nil, fmt.Errorf("fail to unmarshal manifest %s: %v", chunk.GetFileIdString(), err) + } + + // recursive + filer_pb.AfterEntryDeserialization(m.Chunks) + return m.Chunks, nil +} + +// TODO fetch from cache for weed mount? +func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { + urlStrings, err := lookupFileIdFn(fileId) + if err != nil { + glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + return nil, err + } + return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0) +} + +func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) { + + var err error + var shouldRetry bool + receivedData := make([]byte, 0, size) + + for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { + for _, urlString := range urlStrings { + receivedData = receivedData[:0] + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { + receivedData = append(receivedData, data...) + }) + if !shouldRetry { + break + } + if err != nil { + glog.V(0).Infof("read %s failed, err: %v", urlString, err) + } else { + break + } + } + if err != nil && shouldRetry { + glog.V(0).Infof("retry reading in %v", waitTime) + time.Sleep(waitTime) + } else { + break + } + } + + return receivedData, err + +} + +func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) { + return doMaybeManifestize(saveFunc, inputChunks, ManifestBatch, mergeIntoManifest) +} + +func doMaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk, mergeFactor int, mergefn func(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error)) (chunks []*filer_pb.FileChunk, err error) { + + var dataChunks []*filer_pb.FileChunk + for _, chunk := range inputChunks { + if !chunk.IsChunkManifest { + dataChunks = append(dataChunks, chunk) + } else { + chunks = append(chunks, chunk) + } + } + + remaining := len(dataChunks) + for i := 0; i+mergeFactor <= len(dataChunks); i += mergeFactor { + chunk, err := mergefn(saveFunc, dataChunks[i:i+mergeFactor]) + if err != nil { + return dataChunks, err + } + chunks = append(chunks, chunk) + remaining -= mergeFactor + } + // remaining + for i := len(dataChunks) - remaining; i < len(dataChunks); i++ { + chunks = append(chunks, dataChunks[i]) + } + return +} + +func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) { + + filer_pb.BeforeEntrySerialization(dataChunks) + + // create and serialize the manifest + data, serErr := proto.Marshal(&filer_pb.FileChunkManifest{ + Chunks: dataChunks, + }) + if serErr != nil { + return nil, fmt.Errorf("serializing manifest: %v", serErr) + } + + minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64) + for _, chunk := range dataChunks { + if minOffset > int64(chunk.Offset) { + minOffset = chunk.Offset + } + if maxOffset < int64(chunk.Size)+chunk.Offset { + maxOffset = int64(chunk.Size) + chunk.Offset + } + } + + manifestChunk, _, _, err = saveFunc(bytes.NewReader(data), "", 0) + if err != nil { + return nil, err + } + manifestChunk.IsChunkManifest = true + manifestChunk.Offset = minOffset + manifestChunk.Size = uint64(maxOffset - minOffset) + + return +} + +type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) diff --git a/weed/filer/filechunk_manifest_test.go b/weed/filer/filechunk_manifest_test.go new file mode 100644 index 000000000..ce12c5da6 --- /dev/null +++ b/weed/filer/filechunk_manifest_test.go @@ -0,0 +1,113 @@ +package filer + +import ( + "bytes" + "math" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func TestDoMaybeManifestize(t *testing.T) { + var manifestTests = []struct { + inputs []*filer_pb.FileChunk + expected []*filer_pb.FileChunk + }{ + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: false}, + {FileId: "2", IsChunkManifest: false}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "12", IsChunkManifest: true}, + {FileId: "34", IsChunkManifest: true}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: false}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "23", IsChunkManifest: true}, + {FileId: "4", IsChunkManifest: false}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: false}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "2", IsChunkManifest: true}, + {FileId: "13", IsChunkManifest: true}, + {FileId: "4", IsChunkManifest: false}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "34", IsChunkManifest: true}, + }, + }, + } + + for i, mtest := range manifestTests { + println("test", i) + actual, _ := doMaybeManifestize(nil, mtest.inputs, 2, mockMerge) + assertEqualChunks(t, mtest.expected, actual) + } + +} + +func assertEqualChunks(t *testing.T, expected, actual []*filer_pb.FileChunk) { + assert.Equal(t, len(expected), len(actual)) + for i := 0; i < len(actual); i++ { + assertEqualChunk(t, actual[i], expected[i]) + } +} +func assertEqualChunk(t *testing.T, expected, actual *filer_pb.FileChunk) { + assert.Equal(t, expected.FileId, actual.FileId) + assert.Equal(t, expected.IsChunkManifest, actual.IsChunkManifest) +} + +func mockMerge(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) { + + var buf bytes.Buffer + minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64) + for k := 0; k < len(dataChunks); k++ { + chunk := dataChunks[k] + buf.WriteString(chunk.FileId) + if minOffset > int64(chunk.Offset) { + minOffset = chunk.Offset + } + if maxOffset < int64(chunk.Size)+chunk.Offset { + maxOffset = int64(chunk.Size) + chunk.Offset + } + } + + manifestChunk = &filer_pb.FileChunk{ + FileId: buf.String(), + } + manifestChunk.IsChunkManifest = true + manifestChunk.Offset = minOffset + manifestChunk.Size = uint64(maxOffset - minOffset) + + return +} diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go new file mode 100644 index 000000000..68f308a51 --- /dev/null +++ b/weed/filer/filechunks.go @@ -0,0 +1,292 @@ +package filer + +import ( + "bytes" + "encoding/hex" + "fmt" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "math" + "sort" + "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { + for _, c := range chunks { + t := uint64(c.Offset + int64(c.Size)) + if size < t { + size = t + } + } + return +} + +func FileSize(entry *filer_pb.Entry) (size uint64) { + return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize) +} + +func ETag(entry *filer_pb.Entry) (etag string) { + if entry.Attributes == nil || entry.Attributes.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attributes.Md5) +} + +func ETagEntry(entry *Entry) (etag string) { + if entry.Attr.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attr.Md5) +} + +func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) { + if len(chunks) == 1 { + return chunks[0].ETag + } + md5_digests := [][]byte{} + for _, c := range chunks { + md5_decoded, _ := hex.DecodeString(c.ETag) + md5_digests = append(md5_digests, md5_decoded) + } + return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks)) +} + +func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { + + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) + + fileIds := make(map[string]bool) + for _, interval := range visibles { + fileIds[interval.fileId] = true + } + for _, chunk := range chunks { + if _, found := fileIds[chunk.GetFileIdString()]; found { + compacted = append(compacted, chunk) + } else { + garbage = append(garbage, chunk) + } + } + + return +} + +func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { + + aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as) + if aErr != nil { + return nil, aErr + } + bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs) + if bErr != nil { + return nil, bErr + } + + delta = append(delta, DoMinusChunks(aData, bData)...) + delta = append(delta, DoMinusChunks(aMeta, bMeta)...) + return +} + +func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { + + fileIds := make(map[string]bool) + for _, interval := range bs { + fileIds[interval.GetFileIdString()] = true + } + for _, chunk := range as { + if _, found := fileIds[chunk.GetFileIdString()]; !found { + delta = append(delta, chunk) + } + } + + return +} + +type ChunkView struct { + FileId string + Offset int64 + Size uint64 + LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk + ChunkSize uint64 + CipherKey []byte + IsGzipped bool +} + +func (cv *ChunkView) IsFullChunk() bool { + return cv.Size == cv.ChunkSize +} + +func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { + + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) + + return ViewFromVisibleIntervals(visibles, offset, size) + +} + +func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { + + stop := offset + size + if size == math.MaxInt64 { + stop = math.MaxInt64 + } + if stop < offset { + stop = math.MaxInt64 + } + + for _, chunk := range visibles { + + chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) + + if chunkStart < chunkStop { + views = append(views, &ChunkView{ + FileId: chunk.fileId, + Offset: chunkStart - chunk.start + chunk.chunkOffset, + Size: uint64(chunkStop - chunkStart), + LogicOffset: chunkStart, + ChunkSize: chunk.chunkSize, + CipherKey: chunk.cipherKey, + IsGzipped: chunk.isGzipped, + }) + } + } + + return views + +} + +func logPrintf(name string, visibles []VisibleInterval) { + + /* + glog.V(0).Infof("%s len %d", name, len(visibles)) + for _, v := range visibles { + glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) + } + */ +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(VisibleInterval) + }, +} + +func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { + + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) + + length := len(visibles) + if length == 0 { + return append(visibles, newV) + } + last := visibles[length-1] + if last.stop <= chunk.Offset { + return append(visibles, newV) + } + + logPrintf(" before", visibles) + // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) + chunkStop := chunk.Offset + int64(chunk.Size) + for _, v := range visibles { + if v.start < chunk.Offset && chunk.Offset < v.stop { + t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if v.start < chunkStop && chunkStop < v.stop { + t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if chunkStop <= v.start || v.stop <= chunk.Offset { + newVisibles = append(newVisibles, v) + // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) + } + } + newVisibles = append(newVisibles, newV) + + logPrintf(" append", newVisibles) + + for i := len(newVisibles) - 1; i >= 0; i-- { + if i > 0 && newV.start < newVisibles[i-1].start { + newVisibles[i] = newVisibles[i-1] + } else { + newVisibles[i] = newV + break + } + } + logPrintf(" sorted", newVisibles) + + return newVisibles +} + +// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory +// If the file chunk content is a chunk manifest +func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) { + + chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks) + + sort.Slice(chunks, func(i, j int) bool { + if chunks[i].Mtime == chunks[j].Mtime { + filer_pb.EnsureFid(chunks[i]) + filer_pb.EnsureFid(chunks[j]) + if chunks[i].Fid == nil || chunks[j].Fid == nil { + return true + } + return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey + } + return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run + }) + + for _, chunk := range chunks { + + // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) + visibles = MergeIntoVisibles(visibles, chunk) + + logPrintf("add", visibles) + + } + + return +} + +// find non-overlapping visible intervals +// visible interval map to one file chunk + +type VisibleInterval struct { + start int64 + stop int64 + modifiedTime int64 + fileId string + chunkOffset int64 + chunkSize uint64 + cipherKey []byte + isGzipped bool +} + +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval { + return VisibleInterval{ + start: start, + stop: stop, + fileId: fileId, + modifiedTime: modifiedTime, + chunkOffset: chunkOffset, // the starting position in the chunk + chunkSize: chunkSize, + cipherKey: cipherKey, + isGzipped: isGzipped, + } +} + +func min(x, y int64) int64 { + if x <= y { + return x + } + return y +} +func max(x, y int64) int64 { + if x <= y { + return y + } + return x +} diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go new file mode 100644 index 000000000..9f9566d9b --- /dev/null +++ b/weed/filer/filechunks2_test.go @@ -0,0 +1,46 @@ +package filer + +import ( + "sort" + "testing" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func TestCompactFileChunksRealCase(t *testing.T) { + + chunks := []*filer_pb.FileChunk{ + {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497}, + {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492}, + {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928}, + {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894}, + {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900}, + {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904}, + {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910}, + {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903}, + {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911}, + {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909}, + {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922}, + } + + printChunks("before", chunks) + + compacted, garbage := CompactFileChunks(nil, chunks) + + printChunks("compacted", compacted) + printChunks("garbage", garbage) + +} + +func printChunks(name string, chunks []*filer_pb.FileChunk) { + sort.Slice(chunks, func(i, j int) bool { + if chunks[i].Offset == chunks[j].Offset { + return chunks[i].Mtime < chunks[j].Mtime + } + return chunks[i].Offset < chunks[j].Offset + }) + for _, chunk := range chunks { + glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + } +} diff --git a/weed/filer2/filechunks_test.go b/weed/filer/filechunks_test.go index e75e60753..699e7e298 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -1,10 +1,15 @@ -package filer2 +package filer import ( + "fmt" "log" + "math" + "math/rand" + "strconv" "testing" - "fmt" + "github.com/stretchr/testify/assert" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -16,7 +21,7 @@ func TestCompactFileChunks(t *testing.T) { {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300}, } - compacted, garbage := CompactFileChunks(chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 3 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -49,7 +54,7 @@ func TestCompactFileChunks2(t *testing.T) { }) } - compacted, garbage := CompactFileChunks(chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 4 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -59,6 +64,42 @@ func TestCompactFileChunks2(t *testing.T) { } } +func TestRandomFileChunksCompact(t *testing.T) { + + data := make([]byte, 1024) + + var chunks []*filer_pb.FileChunk + for i := 0; i < 15; i++ { + start, stop := rand.Intn(len(data)), rand.Intn(len(data)) + if start > stop { + start, stop = stop, start + } + if start+16 < stop { + stop = start + 16 + } + chunk := &filer_pb.FileChunk{ + FileId: strconv.Itoa(i), + Offset: int64(start), + Size: uint64(stop - start), + Mtime: int64(i), + Fid: &filer_pb.FileId{FileKey: uint64(i)}, + } + chunks = append(chunks, chunk) + for x := start; x < stop; x++ { + data[x] = byte(i) + } + } + + visibles, _ := NonOverlappingVisibleIntervals(nil, chunks) + + for _, v := range visibles { + for x := v.start; x < v.stop; x++ { + assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId) + } + } + +} + func TestIntervalMerging(t *testing.T) { testcases := []struct { @@ -91,12 +132,12 @@ func TestIntervalMerging(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 70, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 50, fileId: "asdf"}, - {start: 50, stop: 100, fileId: "abc"}, + {start: 0, stop: 70, fileId: "b"}, + {start: 70, stop: 100, fileId: "a", chunkOffset: 70}, }, }, // case 3: updates overwrite full chunks @@ -126,25 +167,25 @@ func TestIntervalMerging(t *testing.T) { // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, - {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "d", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "c", Mtime: 143}, + {Offset: 80, Size: 100, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 200, fileId: "asdf"}, - {start: 200, stop: 220, fileId: "abc"}, + {start: 0, stop: 200, fileId: "d"}, + {start: 200, stop: 220, fileId: "c", chunkOffset: 130}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 100, fileId: "abc"}, + {start: 0, stop: 100, fileId: "xyz"}, }, }, // case 7: real updates @@ -186,7 +227,7 @@ func TestIntervalMerging(t *testing.T) { for i, testcase := range testcases { log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) - intervals := NonOverlappingVisibleIntervals(testcase.Chunks) + intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks) for x, interval := range intervals { log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s", i, x, interval.start, interval.stop, interval.fileId) @@ -204,6 +245,10 @@ func TestIntervalMerging(t *testing.T) { t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", i, x, interval.fileId, testcase.Expected[x].fileId) } + if interval.chunkOffset != testcase.Expected[x].chunkOffset { + t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d", + i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset) + } } if len(intervals) != len(testcase.Expected) { t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected)) @@ -218,7 +263,7 @@ func TestChunksReading(t *testing.T) { testcases := []struct { Chunks []*filer_pb.FileChunk Offset int64 - Size int + Size int64 Expected []*ChunkView }{ // case 0: normal @@ -251,14 +296,14 @@ func TestChunksReading(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, + {Offset: 3, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 10, Size: 50, FileId: "b", Mtime: 134}, }, - Offset: 25, - Size: 50, + Offset: 30, + Size: 40, Expected: []*ChunkView{ - {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25}, - {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50}, + {Offset: 20, Size: 30, FileId: "b", LogicOffset: 30}, + {Offset: 57, Size: 10, FileId: "a", LogicOffset: 60}, }, }, // case 3: updates overwrite full chunks @@ -286,35 +331,35 @@ func TestChunksReading(t *testing.T) { Size: 400, Expected: []*ChunkView{ {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250}, }, }, // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "c", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "b", Mtime: 143}, {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, }, Offset: 0, Size: 220, Expected: []*ChunkView{ - {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200}, + {Offset: 0, Size: 200, FileId: "c", LogicOffset: 0}, + {Offset: 130, Size: 20, FileId: "b", LogicOffset: 200}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Offset: 0, Size: 100, Expected: []*ChunkView{ - {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0}, }, }, // case 7: edge cases @@ -331,21 +376,60 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { + if i != 2 { + // continue + } log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) - chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size) + chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size) for x, chunk := range chunks { log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", i, x, chunk.Offset, chunk.Size, chunk.FileId) if chunk.Offset != testcase.Expected[x].Offset { - t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d", - i, x, chunk.Offset, testcase.Expected[x].Offset) + t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d", + i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset) } if chunk.Size != testcase.Expected[x].Size { - t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d", - i, x, chunk.Size, testcase.Expected[x].Size) + t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d", + i, chunk.FileId, chunk.Size, testcase.Expected[x].Size) } if chunk.FileId != testcase.Expected[x].FileId { t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", @@ -379,6 +463,77 @@ func BenchmarkCompactFileChunks(b *testing.B) { } for n := 0; n < b.N; n++ { - CompactFileChunks(chunks) + CompactFileChunks(nil, chunks) + } +} + +func TestViewFromVisibleIntervals(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 0, + stop: 25, + fileId: "fid1", + }, + { + start: 4096, + stop: 8192, + fileId: "fid2", + }, + { + start: 16384, + stop: 18551, + fileId: "fid3", + }, } + + views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + +} + +func TestViewFromVisibleIntervals2(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 344064, + stop: 348160, + fileId: "fid1", + }, + { + start: 348160, + stop: 356352, + fileId: "fid2", + }, + } + + views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + +} + +func TestViewFromVisibleIntervals3(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 1000, + stop: 2000, + fileId: "fid1", + }, + { + start: 3000, + stop: 4000, + fileId: "fid2", + }, + } + + views := ViewFromVisibleIntervals(visibles, 1700, 1500) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + } diff --git a/weed/filer/filer.go b/weed/filer/filer.go new file mode 100644 index 000000000..effdc0e4e --- /dev/null +++ b/weed/filer/filer.go @@ -0,0 +1,304 @@ +package filer + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +const ( + LogFlushInterval = time.Minute + PaginationSize = 1024 + FilerStoreId = "filer.store.id" +) + +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + +type Filer struct { + Store VirtualFilerStore + MasterClient *wdclient.MasterClient + fileIdDeletionQueue *util.UnboundedQueue + GrpcDialOption grpc.DialOption + DirBucketsPath string + FsyncBuckets []string + buckets *FilerBuckets + Cipher bool + LocalMetaLogBuffer *log_buffer.LogBuffer + metaLogCollection string + metaLogReplication string + MetaAggregator *MetaAggregator + Signature int32 + FilerConf *FilerConf +} + +func NewFiler(masters []string, grpcDialOption grpc.DialOption, + filerHost string, filerGrpcPort uint32, collection string, replication string, dataCenter string, notifyFn func()) *Filer { + f := &Filer{ + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, dataCenter, masters), + fileIdDeletionQueue: util.NewUnboundedQueue(), + GrpcDialOption: grpcDialOption, + FilerConf: NewFilerConf(), + } + f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn) + f.metaLogCollection = collection + f.metaLogReplication = replication + + go f.loopProcessingDeletion() + + return f +} + +func (f *Filer) AggregateFromPeers(self string, filers []string) { + + // set peers + found := false + for _, peer := range filers { + if peer == self { + found = true + } + } + if !found { + filers = append(filers, self) + } + + f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption) + f.MetaAggregator.StartLoopSubscribe(f, self) + +} + +func (f *Filer) SetStore(store FilerStore) { + f.Store = NewFilerStoreWrapper(store) + + f.setOrLoadFilerStoreSignature(store) + +} + +func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) { + storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId)) + if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 { + f.Signature = util.RandomInt32() + storeIdBytes = make([]byte, 4) + util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) + if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { + glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) + } + glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + } else if err == nil && len(storeIdBytes) == 4 { + f.Signature = int32(util.BytesToUint32(storeIdBytes)) + glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) + } else { + glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) + } +} + +func (f *Filer) GetStore() (store FilerStore) { + return f.Store +} + +func (fs *Filer) GetMaster() string { + return fs.MasterClient.GetMaster() +} + +func (fs *Filer) KeepConnectedToMaster() { + fs.MasterClient.KeepConnectedToMaster() +} + +func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { + return f.Store.BeginTransaction(ctx) +} + +func (f *Filer) CommitTransaction(ctx context.Context) error { + return f.Store.CommitTransaction(ctx) +} + +func (f *Filer) RollbackTransaction(ctx context.Context) error { + return f.Store.RollbackTransaction(ctx) +} + +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32) error { + + if string(entry.FullPath) == "/" { + return nil + } + + oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + + /* + if !hasWritePermission(lastDirectoryEntry, entry) { + glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", + lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) + return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) + } + */ + + if oldEntry == nil { + + dirParts := strings.Split(string(entry.FullPath), "/") + if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil { + return err + } + + glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + if err := f.Store.InsertEntry(ctx, entry); err != nil { + glog.Errorf("insert entry %s: %v", entry.FullPath, err) + return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) + } + } else { + if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } + glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { + glog.Errorf("update entry %s: %v", entry.FullPath, err) + return fmt.Errorf("update entry %s: %v", entry.FullPath, err) + } + } + + f.maybeAddBucket(entry) + f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures) + + f.deleteChunksIfNotNew(oldEntry, entry) + + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + + return nil +} + +func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) { + + if level == 0 { + return nil + } + + dirPath := "/" + util.Join(dirParts[:level]...) + // fmt.Printf("%d directory: %+v\n", i, dirPath) + + // check the store directly + glog.V(4).Infof("find uncached directory: %s", dirPath) + dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) + + // no such existing directory + if dirEntry == nil { + + // ensure parent directory + if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil { + return err + } + + // create the directory + now := time.Now() + + dirEntry = &Entry{ + FullPath: util.FullPath(dirPath), + Attr: Attr{ + Mtime: now, + Crtime: now, + Mode: os.ModeDir | entry.Mode | 0110, + Uid: entry.Uid, + Gid: entry.Gid, + Collection: entry.Collection, + Replication: entry.Replication, + UserName: entry.UserName, + GroupNames: entry.GroupNames, + }, + } + + glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) + mkdirErr := f.Store.InsertEntry(ctx, dirEntry) + if mkdirErr != nil { + if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) + return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) + } + } else { + f.maybeAddBucket(dirEntry) + f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil) + } + + } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + return fmt.Errorf("%s is a file", dirPath) + } + + return nil +} + +func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { + if oldEntry != nil { + entry.Attr.Crtime = oldEntry.Attr.Crtime + if oldEntry.IsDirectory() && !entry.IsDirectory() { + glog.Errorf("existing %s is a directory", oldEntry.FullPath) + return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) + } + if !oldEntry.IsDirectory() && entry.IsDirectory() { + glog.Errorf("existing %s is a file", oldEntry.FullPath) + return fmt.Errorf("existing %s is a file", oldEntry.FullPath) + } + } + return f.Store.UpdateEntry(ctx, entry) +} + +var ( + Root = &Entry{ + FullPath: "/", + Attr: Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.ModeDir | 0755, + Uid: OS_UID, + Gid: OS_GID, + }, + } +) + +func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) { + + if string(p) == "/" { + return Root, nil + } + entry, err = f.Store.FindEntry(ctx, p) + if entry != nil && entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.Store.DeleteOneEntry(ctx, entry) + return nil, filer_pb.ErrNotFound + } + } + return + +} + +func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) { + lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool { + if entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.Store.DeleteOneEntry(ctx, entry) + expiredCount++ + return true + } + } + return eachEntryFunc(entry) + }) + if err != nil { + return expiredCount, lastFileName, err + } + return +} + +func (f *Filer) Shutdown() { + f.LocalMetaLogBuffer.Shutdown() + f.Store.Shutdown() +} diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go new file mode 100644 index 000000000..43fb000c9 --- /dev/null +++ b/weed/filer/filer_buckets.go @@ -0,0 +1,121 @@ +package filer + +import ( + "context" + "math" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type BucketName string +type BucketOption struct { + Name BucketName + Replication string + fsync bool +} +type FilerBuckets struct { + dirBucketsPath string + buckets map[BucketName]*BucketOption + sync.RWMutex +} + +func (f *Filer) LoadBuckets() { + + f.buckets = &FilerBuckets{ + buckets: make(map[BucketName]*BucketOption), + } + + limit := int64(math.MaxInt32) + + entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "") + + if err != nil { + glog.V(1).Infof("no buckets found: %v", err) + return + } + + shouldFsyncMap := make(map[string]bool) + for _, bucket := range f.FsyncBuckets { + shouldFsyncMap[bucket] = true + } + + glog.V(1).Infof("buckets found: %d", len(entries)) + + f.buckets.Lock() + for _, entry := range entries { + _, shouldFsnyc := shouldFsyncMap[entry.Name()] + f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ + Name: BucketName(entry.Name()), + Replication: entry.Replication, + fsync: shouldFsnyc, + } + } + f.buckets.Unlock() + +} + +func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) { + + f.buckets.RLock() + defer f.buckets.RUnlock() + + option, found := f.buckets.buckets[BucketName(buketName)] + + if !found { + return "", false + } + return option.Replication, option.fsync + +} + +func (f *Filer) isBucket(entry *Entry) bool { + if !entry.IsDirectory() { + return false + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return false + } + + f.buckets.RLock() + defer f.buckets.RUnlock() + + _, found := f.buckets.buckets[BucketName(dirName)] + + return found + +} + +func (f *Filer) maybeAddBucket(entry *Entry) { + if !entry.IsDirectory() { + return + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return + } + f.addBucket(dirName, &BucketOption{ + Name: BucketName(dirName), + Replication: entry.Replication, + }) +} + +func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + f.buckets.buckets[BucketName(buketName)] = bucketOption + +} + +func (f *Filer) deleteBucket(buketName string) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + delete(f.buckets.buckets, BucketName(buketName)) + +} diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go new file mode 100644 index 000000000..ab5afc5cc --- /dev/null +++ b/weed/filer/filer_conf.go @@ -0,0 +1,149 @@ +package filer + +import ( + "bytes" + "context" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/jsonpb" + "github.com/viant/ptrie" +) + +const ( + DirectoryEtcRoot = "/etc" + DirectoryEtcSeaweedFS = "/etc/seaweedfs" + FilerConfName = "filer.conf" + IamConfigDirecotry = "/etc/iam" + IamIdentityFile = "identity.json" + IamPoliciesFile = "policies.json" +) + +type FilerConf struct { + rules ptrie.Trie +} + +func NewFilerConf() (fc *FilerConf) { + fc = &FilerConf{ + rules: ptrie.New(), + } + return fc +} + +func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { + filerConfPath := util.NewFullPath(DirectoryEtcSeaweedFS, FilerConfName) + entry, err := filer.FindEntry(context.Background(), filerConfPath) + if err != nil { + if err == filer_pb.ErrNotFound { + return nil + } + glog.Errorf("read filer conf entry %s: %v", filerConfPath, err) + return + } + + if len(entry.Content) > 0 { + return fc.LoadFromBytes(entry.Content) + } + + return fc.loadFromChunks(filer, entry.Content, entry.Chunks) +} + +func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) { + if len(content) == 0 { + content, err = filer.readEntry(chunks) + if err != nil { + glog.Errorf("read filer conf content: %v", err) + return + } + } + + return fc.LoadFromBytes(content) +} + +func (fc *FilerConf) LoadFromBytes(data []byte) (err error) { + conf := &filer_pb.FilerConf{} + + if err := jsonpb.Unmarshal(bytes.NewReader(data), conf); err != nil { + return err + } + + return fc.doLoadConf(conf) +} + +func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) { + for _, location := range conf.Locations { + err = fc.AddLocationConf(location) + if err != nil { + // this is not recoverable + return nil + } + } + return nil +} + +func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { + err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) + if err != nil { + glog.Errorf("put location prefix: %v", err) + } + return +} + +func (fc *FilerConf) DeleteLocationConf(locationPrefix string) { + rules := ptrie.New() + fc.rules.Walk(func(key []byte, value interface{}) bool { + if string(key) == locationPrefix { + return true + } + rules.Put(key, value) + return true + }) + fc.rules = rules + return +} + +func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) { + pathConf = &filer_pb.FilerConf_PathConf{} + fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + t := value.(*filer_pb.FilerConf_PathConf) + mergePathConf(pathConf, t) + return true + }) + return pathConf +} + +// merge if values in b is not empty, merge them into a +func mergePathConf(a, b *filer_pb.FilerConf_PathConf) { + a.Collection = util.Nvl(b.Collection, a.Collection) + a.Replication = util.Nvl(b.Replication, a.Replication) + a.Ttl = util.Nvl(b.Ttl, a.Ttl) + if b.DiskType != "" { + a.DiskType = b.DiskType + } + a.Fsync = b.Fsync || a.Fsync + if b.VolumeGrowthCount > 0 { + a.VolumeGrowthCount = b.VolumeGrowthCount + } +} + +func (fc *FilerConf) ToProto() *filer_pb.FilerConf { + m := &filer_pb.FilerConf{} + fc.rules.Walk(func(key []byte, value interface{}) bool { + pathConf := value.(*filer_pb.FilerConf_PathConf) + m.Locations = append(m.Locations, pathConf) + return true + }) + return m +} + +func (fc *FilerConf) ToText(writer io.Writer) error { + + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", + } + + return m.Marshal(writer, fc.ToProto()) +} diff --git a/weed/filer/filer_conf_test.go b/weed/filer/filer_conf_test.go new file mode 100644 index 000000000..ff868a3ec --- /dev/null +++ b/weed/filer/filer_conf_test.go @@ -0,0 +1,34 @@ +package filer + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/stretchr/testify/assert" +) + +func TestFilerConf(t *testing.T) { + + fc := NewFilerConf() + + conf := &filer_pb.FilerConf{Locations: []*filer_pb.FilerConf_PathConf{ + { + LocationPrefix: "/buckets/abc", + Collection: "abc", + }, + { + LocationPrefix: "/buckets/abcd", + Collection: "abcd", + }, + { + LocationPrefix: "/buckets/", + Replication: "001", + }, + }} + fc.doLoadConf(conf) + + assert.Equal(t, "abc", fc.MatchStorageRule("/buckets/abc/jasdf").Collection) + assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection) + assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication) + +} diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go new file mode 100644 index 000000000..3ef3cfff9 --- /dev/null +++ b/weed/filer/filer_delete_entry.go @@ -0,0 +1,161 @@ +package filer + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type HardLinkId []byte + +const ( + MsgFailDelNonEmptyFolder = "fail to delete non-empty folder" +) + +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) { + if p == "/" { + return nil + } + + entry, findErr := f.FindEntry(ctx, p) + if findErr != nil { + return findErr + } + + isDeleteCollection := f.isBucket(entry) + + var chunks []*filer_pb.FileChunk + var hardLinkIds []HardLinkId + chunks = append(chunks, entry.Chunks...) + if entry.IsDirectory() { + // delete the folder children, not including the folder itself + var dirChunks []*filer_pb.FileChunk + var dirHardLinkIds []HardLinkId + dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures) + if err != nil { + glog.V(0).Infof("delete directory %s: %v", p, err) + return fmt.Errorf("delete directory %s: %v", p, err) + } + chunks = append(chunks, dirChunks...) + hardLinkIds = append(hardLinkIds, dirHardLinkIds...) + } + + // delete the file or folder + err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster, signatures) + if err != nil { + return fmt.Errorf("delete file %s: %v", p, err) + } + + if shouldDeleteChunks && !isDeleteCollection { + f.DirectDeleteChunks(chunks) + } + // A case not handled: + // what if the chunk is in a different collection? + if shouldDeleteChunks { + f.maybeDeleteHardLinks(hardLinkIds) + } + + if isDeleteCollection { + collectionName := entry.Name() + f.doDeleteCollection(collectionName) + f.deleteBucket(collectionName) + } + + return nil +} + +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) { + + lastFileName := "" + includeLastFile := false + if !isDeletingBucket { + for { + entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") + if err != nil { + glog.Errorf("list folder %s: %v", entry.FullPath, err) + return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) + } + if lastFileName == "" && !isRecursive && len(entries) > 0 { + // only for first iteration in the loop + glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) + } + + for _, sub := range entries { + lastFileName = sub.Name() + var dirChunks []*filer_pb.FileChunk + var dirHardLinkIds []HardLinkId + if sub.IsDirectory() { + subIsDeletingBucket := f.isBucket(sub) + dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil) + chunks = append(chunks, dirChunks...) + hardlinkIds = append(hardlinkIds, dirHardLinkIds...) + } else { + f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil) + if len(sub.HardLinkId) != 0 { + // hard link chunk data are deleted separately + hardlinkIds = append(hardlinkIds, sub.HardLinkId) + } else { + chunks = append(chunks, sub.Chunks...) + } + } + if err != nil && !ignoreRecursiveError { + return nil, nil, err + } + } + + if len(entries) < PaginationSize { + break + } + } + } + + glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) + + if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { + return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + + f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) + + return chunks, hardlinkIds, nil +} + +func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { + + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + + if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + if !entry.IsDirectory() { + f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) + } + + return nil +} + +func (f *Filer) doDeleteCollection(collectionName string) (err error) { + + return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + if err != nil { + glog.Infof("delete collection %s: %v", collectionName, err) + } + return err + }) + +} + +func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { + for _, hardLinkId := range hardLinkIds { + if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { + glog.Errorf("delete hard link id %d : %v", hardLinkId, err) + } + } +} diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go new file mode 100644 index 000000000..9eee38277 --- /dev/null +++ b/weed/filer/filer_deletion.go @@ -0,0 +1,153 @@ +package filer + +import ( + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) { + return func(vids []string) (map[string]operation.LookupResult, error) { + m := make(map[string]operation.LookupResult) + for _, vid := range vids { + locs, _ := masterClient.GetVidLocations(vid) + var locations []operation.Location + for _, loc := range locs { + locations = append(locations, operation.Location{ + Url: loc.Url, + PublicUrl: loc.PublicUrl, + }) + } + m[vid] = operation.LookupResult{ + VolumeId: vid, + Locations: locations, + } + } + return m, nil + } +} + +func (f *Filer) loopProcessingDeletion() { + + lookupFunc := LookupByMasterClientFn(f.MasterClient) + + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + var deletionCount int + for { + deletionCount = 0 + f.fileIdDeletionQueue.Consume(func(fileIds []string) { + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount = len(toDeleteFileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } + } else { + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) + } + } + }) + + if deletionCount == 0 { + time.Sleep(1123 * time.Millisecond) + } + } +} + +func (f *Filer) doDeleteFileIds(fileIds []string) { + + lookupFunc := LookupByMasterClientFn(f.MasterClient) + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount := len(toDeleteFileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } + } + } +} + +func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) { + var fildIdsToDelete []string + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + continue + } + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) + if manifestResolveErr != nil { + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + } + for _, dChunk := range dataChunks { + fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString()) + } + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + } + + f.doDeleteFileIds(fildIdsToDelete) +} + +func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) + continue + } + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) + if manifestResolveErr != nil { + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + } + for _, dChunk := range dataChunks { + f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) + } + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) + } +} + +func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { + + if oldEntry == nil { + return + } + if newEntry == nil { + f.DeleteChunks(oldEntry.Chunks) + } + + var toDelete []*filer_pb.FileChunk + newChunkIds := make(map[string]bool) + for _, newChunk := range newEntry.Chunks { + newChunkIds[newChunk.GetFileIdString()] = true + } + + for _, oldChunk := range oldEntry.Chunks { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { + toDelete = append(toDelete, oldChunk) + } + } + f.DeleteChunks(toDelete) +} diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go new file mode 100644 index 000000000..7ab101102 --- /dev/null +++ b/weed/filer/filer_notify.go @@ -0,0 +1,185 @@ +package filer + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) { + var fullpath string + if oldEntry != nil { + fullpath = string(oldEntry.FullPath) + } else if newEntry != nil { + fullpath = string(newEntry.FullPath) + } else { + return + } + + // println("fullpath:", fullpath) + + if strings.HasPrefix(fullpath, SystemLogDir) { + return + } + foundSelf := false + for _, sig := range signatures { + if sig == f.Signature { + foundSelf = true + } + } + if !foundSelf { + signatures = append(signatures, f.Signature) + } + + newParentPath := "" + if newEntry != nil { + newParentPath, _ = newEntry.FullPath.DirAndName() + } + eventNotification := &filer_pb.EventNotification{ + OldEntry: oldEntry.ToProtoEntry(), + NewEntry: newEntry.ToProtoEntry(), + DeleteChunks: deleteChunks, + NewParentPath: newParentPath, + IsFromOtherCluster: isFromOtherCluster, + Signatures: signatures, + } + + if notification.Queue != nil { + glog.V(3).Infof("notifying entry update %v", fullpath) + if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil { + // throw message + glog.Error(err) + } + } + + f.logMetaEvent(ctx, fullpath, eventNotification) + +} + +func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) { + + dir, _ := util.FullPath(fullpath).DirAndName() + + event := &filer_pb.SubscribeMetadataResponse{ + Directory: dir, + EventNotification: eventNotification, + TsNs: time.Now().UnixNano(), + } + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return + } + + f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs) + +} + +func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { + + if len(buf) == 0 { + return + } + + startTime, stopTime = startTime.UTC(), stopTime.UTC() + + targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + // startTime.Second(), startTime.Nanosecond(), + ) + + for { + if err := f.appendToFile(targetFile, buf); err != nil { + glog.V(1).Infof("log write failed %s: %v", targetFile, err) + time.Sleep(737 * time.Millisecond) + } else { + break + } + } +} + +func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "") + if listDayErr != nil { + return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) + } + for _, dayEntry := range dayEntries { + // println("checking day", dayEntry.FullPath) + hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "") + if listHourMinuteErr != nil { + return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) + } + for _, hourMinuteEntry := range hourMinuteEntries { + // println("checking hh-mm", hourMinuteEntry.FullPath) + if dayEntry.Name() == startDate { + if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 { + continue + } + } + // println("processing", hourMinuteEntry.FullPath) + chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) + if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + continue + } + return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) + } + chunkedFileReader.Close() + } + } + + return lastTsNs, nil +} + +func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + for { + n, err := r.Read(sizeBuf) + if err != nil { + return lastTsNs, err + } + if n != 4 { + return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n) + } + size := util.BytesToUint32(sizeBuf) + // println("entry size", size) + entryData := make([]byte, size) + n, err = r.Read(entryData) + if err != nil { + return lastTsNs, err + } + if n != int(size) { + return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size) + } + logEntry := &filer_pb.LogEntry{} + if err = proto.Unmarshal(entryData, logEntry); err != nil { + return lastTsNs, err + } + if logEntry.TsNs <= ns { + continue + } + // println("each log: ", logEntry.TsNs) + if err := eachLogEntryFn(logEntry); err != nil { + return lastTsNs, err + } else { + lastTsNs = logEntry.TsNs + } + } +} diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go new file mode 100644 index 000000000..d441bbbc9 --- /dev/null +++ b/weed/filer/filer_notify_append.go @@ -0,0 +1,75 @@ +package filer + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) appendToFile(targetFile string, data []byte) error { + + assignResult, uploadResult, err2 := f.assignAndUpload(targetFile, data) + if err2 != nil { + return err2 + } + + // find out existing entry + fullpath := util.FullPath(targetFile) + entry, err := f.FindEntry(context.Background(), fullpath) + var offset int64 = 0 + if err == filer_pb.ErrNotFound { + entry = &Entry{ + FullPath: fullpath, + Attr: Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(TotalSize(entry.Chunks)) + } + + // append to existing chunks + entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset)) + + // update the entry + err = f.CreateEntry(context.Background(), entry, false, false, nil) + + return err +} + +func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + // assign a volume location + rule := f.FilerConf.MatchStorageRule(targetFile) + assignRequest := &operation.VolumeAssignRequest{ + Count: 1, + Collection: util.Nvl(f.metaLogCollection, rule.Collection), + Replication: util.Nvl(f.metaLogReplication, rule.Replication), + WritableVolumeCount: rule.VolumeGrowthCount, + } + + assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest) + if err != nil { + return nil, nil, fmt.Errorf("AssignVolume: %v", err) + } + if assignResult.Error != "" { + return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error) + } + + // upload data + targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} diff --git a/weed/filer2/filer_notify_test.go b/weed/filer/filer_notify_test.go index b74e2ad35..6a2be8f18 100644 --- a/weed/filer2/filer_notify_test.go +++ b/weed/filer/filer_notify_test.go @@ -1,17 +1,19 @@ -package filer2 +package filer import ( "testing" "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func TestProtoMarshalText(t *testing.T) { oldEntry := &Entry{ - FullPath: FullPath("/this/path/to"), + FullPath: util.FullPath("/this/path/to"), Attr: Attr{ Mtime: time.Now(), Mode: 0644, diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go new file mode 100644 index 000000000..a91faeb24 --- /dev/null +++ b/weed/filer/filer_on_meta_event.go @@ -0,0 +1,82 @@ +package filer + +import ( + "bytes" + "math" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers +func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) { + f.maybeReloadFilerConfiguration(event) + f.onBucketEvents(event) +} + +func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) { + message := event.EventNotification + for _, sig := range message.Signatures { + if sig == f.Signature { + return + } + } + if f.DirBucketsPath == event.Directory { + if message.OldEntry == nil && message.NewEntry != nil { + f.Store.OnBucketCreation(message.NewEntry.Name) + } + if message.OldEntry != nil && message.NewEntry == nil { + f.Store.OnBucketDeletion(message.OldEntry.Name) + } + } +} + +func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataResponse) { + if DirectoryEtcSeaweedFS != event.Directory { + if DirectoryEtcSeaweedFS != event.EventNotification.NewParentPath { + return + } + } + + entry := event.EventNotification.NewEntry + if entry == nil { + return + } + + glog.V(0).Infof("procesing %v", event) + if entry.Name == FilerConfName { + f.reloadFilerConfiguration(entry) + } +} + +func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) { + var buf bytes.Buffer + err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { + fc := NewFilerConf() + err := fc.loadFromChunks(f, entry.Content, entry.Chunks) + if err != nil { + glog.Errorf("read filer conf chunks: %v", err) + return + } + f.FilerConf = fc +} + +func (f *Filer) LoadFilerConf() { + fc := NewFilerConf() + err := util.Retry("loadFilerConf", func() error { + return fc.loadFromFiler(f) + }) + if err != nil { + glog.Errorf("read filer conf: %v", err) + return + } + f.FilerConf = fc +} diff --git a/weed/filer/filer_rename.go b/weed/filer/filer_rename.go new file mode 100644 index 000000000..b6f0cf6de --- /dev/null +++ b/weed/filer/filer_rename.go @@ -0,0 +1,30 @@ +package filer + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +func (f *Filer) CanRename(source, target util.FullPath) error { + sourceBucket := f.DetectBucket(source) + targetBucket := f.DetectBucket(target) + if sourceBucket != targetBucket { + return fmt.Errorf("can not move across collection %s => %s", sourceBucket, targetBucket) + } + return nil +} + +func (f *Filer) DetectBucket(source util.FullPath) (bucket string) { + if strings.HasPrefix(string(source), f.DirBucketsPath+"/") { + bucketAndObjectKey := string(source)[len(f.DirBucketsPath)+1:] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 { + bucket = bucketAndObjectKey + } + if t > 0 { + bucket = bucketAndObjectKey[:t] + } + } + return bucket +} diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go new file mode 100644 index 000000000..f43312cfa --- /dev/null +++ b/weed/filer/filer_search.go @@ -0,0 +1,98 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/util" + "path/filepath" + "strings" +) + +func splitPattern(pattern string) (prefix string, restPattern string) { + position := strings.Index(pattern, "*") + if position >= 0 { + return pattern[:position], pattern[position:] + } + position = strings.Index(pattern, "?") + if position >= 0 { + return pattern[:position], pattern[position:] + } + return "", restPattern +} + +// For now, prefix and namePattern are mutually exclusive +func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) { + + _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool { + entries = append(entries, entry) + return true + }) + + hasMore = int64(len(entries)) >= limit+1 + if hasMore { + entries = entries[:limit] + } + + return entries, hasMore, err +} + +// For now, prefix and namePattern are mutually exclusive +func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + if strings.HasSuffix(string(p), "/") && len(p) > 1 { + p = p[0 : len(p)-1] + } + + prefixInNamePattern, restNamePattern := splitPattern(namePattern) + if prefixInNamePattern != "" { + prefix = prefixInNamePattern + } + var missedCount int64 + + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc) + + for missedCount > 0 && err == nil { + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc) + } + + return +} + +func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) { + + if len(restNamePattern) == 0 && len(namePatternExclude) == 0{ + lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) + return 0, lastFileName, err + } + + lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool { + nameToTest := entry.Name() + if len(namePatternExclude) > 0 { + if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched { + missedCount++ + return true + } + } + if len(restNamePattern) > 0 { + if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched { + missedCount++ + return true + } + } + if !eachEntryFunc(entry) { + return false + } + return true + }) + if err != nil { + return + } + return +} + +func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + var expiredCount int64 + expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) + for expiredCount > 0 && err == nil { + expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc) + } + return +} diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go new file mode 100644 index 000000000..a5b2f25de --- /dev/null +++ b/weed/filer/filerstore.go @@ -0,0 +1,46 @@ +package filer + +import ( + "context" + "errors" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing") + ErrUnsupportedSuperLargeDirectoryListing = errors.New("unsupported super large directory listing") + ErrKvNotImplemented = errors.New("kv not implemented yet") + ErrKvNotFound = errors.New("kv: not found") +) + +type ListEachEntryFunc func(entry *Entry) bool + +type FilerStore interface { + // GetName gets the name to locate the configuration in filer.toml file + GetName() string + // Initialize initializes the file store + Initialize(configuration util.Configuration, prefix string) error + InsertEntry(context.Context, *Entry) error + UpdateEntry(context.Context, *Entry) (err error) + // err == filer_pb.ErrNotFound if not found + FindEntry(context.Context, util.FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, util.FullPath) (err error) + DeleteFolderChildren(context.Context, util.FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) + ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) + + BeginTransaction(ctx context.Context) (context.Context, error) + CommitTransaction(ctx context.Context) error + RollbackTransaction(ctx context.Context) error + + KvPut(ctx context.Context, key []byte, value []byte) (err error) + KvGet(ctx context.Context, key []byte) (value []byte, err error) + KvDelete(ctx context.Context, key []byte) (err error) + + Shutdown() +} + +type BucketAware interface { + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) +} diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go new file mode 100644 index 000000000..316c76a0c --- /dev/null +++ b/weed/filer/filerstore_hardlink.go @@ -0,0 +1,102 @@ +package filer + +import ( + "bytes" + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + // handle hard links + if err := fsw.setHardLink(ctx, entry); err != nil { + return fmt.Errorf("setHardLink %d: %v", entry.HardLinkId, err) + } + + // check what is existing entry + glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) + actualStore := fsw.getActualStore(entry.FullPath) + existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath) + if err != nil && err != filer_pb.ErrNotFound { + return fmt.Errorf("update existing entry %s: %v", entry.FullPath, err) + } + + // remove old hard link + if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 { + glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + return nil +} + +func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + key := entry.HardLinkId + + newBlob, encodeErr := entry.EncodeAttributesAndChunks() + if encodeErr != nil { + return encodeErr + } + + return fsw.KvPut(ctx, key, newBlob) +} + +func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + key := entry.HardLinkId + + glog.V(4).Infof("maybeReadHardLink KvGet %v", key) + value, err := fsw.KvGet(ctx, key) + if err != nil { + glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + return err + } + + if err = entry.DecodeAttributesAndChunks(value); err != nil { + glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + return err + } + + return nil +} + +func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error { + key := hardLinkId + value, err := fsw.KvGet(ctx, key) + if err == ErrKvNotFound { + return nil + } + if err != nil { + return err + } + + entry := &Entry{} + if err = entry.DecodeAttributesAndChunks(value); err != nil { + return err + } + + entry.HardLinkCounter-- + if entry.HardLinkCounter <= 0 { + glog.V(4).Infof("DeleteHardLink KvDelete %v", key) + return fsw.KvDelete(ctx, key) + } + + newBlob, encodeErr := entry.EncodeAttributesAndChunks() + if encodeErr != nil { + return encodeErr + } + + glog.V(4).Infof("DeleteHardLink KvPut %v", key) + return fsw.KvPut(ctx, key, newBlob) + +} diff --git a/weed/filer/filerstore_translate_path.go b/weed/filer/filerstore_translate_path.go new file mode 100644 index 000000000..00bf82ed4 --- /dev/null +++ b/weed/filer/filerstore_translate_path.go @@ -0,0 +1,153 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +var ( + _ = FilerStore(&FilerStorePathTranlator{}) +) + +type FilerStorePathTranlator struct { + actualStore FilerStore + storeRoot string +} + +func NewFilerStorePathTranlator(storeRoot string, store FilerStore) *FilerStorePathTranlator { + if innerStore, ok := store.(*FilerStorePathTranlator); ok { + return innerStore + } + + if !strings.HasSuffix(storeRoot, "/") { + storeRoot += "/" + } + + return &FilerStorePathTranlator{ + actualStore: store, + storeRoot: storeRoot, + } +} + +func (t *FilerStorePathTranlator) translatePath(fp util.FullPath) (newPath util.FullPath) { + newPath = fp + if t.storeRoot == "/" { + return + } + newPath = fp[len(t.storeRoot)-1:] + if newPath == "" { + newPath = "/" + } + return +} +func (t *FilerStorePathTranlator) changeEntryPath(entry *Entry) (previousPath util.FullPath) { + previousPath = entry.FullPath + if t.storeRoot == "/" { + return + } + entry.FullPath = t.translatePath(previousPath) + return +} +func (t *FilerStorePathTranlator) recoverEntryPath(entry *Entry, previousPath util.FullPath) { + entry.FullPath = previousPath +} + +func (t *FilerStorePathTranlator) GetName() string { + return t.actualStore.GetName() +} + +func (t *FilerStorePathTranlator) Initialize(configuration util.Configuration, prefix string) error { + return t.actualStore.Initialize(configuration, prefix) +} + +func (t *FilerStorePathTranlator) InsertEntry(ctx context.Context, entry *Entry) error { + previousPath := t.changeEntryPath(entry) + defer t.recoverEntryPath(entry, previousPath) + + return t.actualStore.InsertEntry(ctx, entry) +} + +func (t *FilerStorePathTranlator) UpdateEntry(ctx context.Context, entry *Entry) error { + previousPath := t.changeEntryPath(entry) + defer t.recoverEntryPath(entry, previousPath) + + return t.actualStore.UpdateEntry(ctx, entry) +} + +func (t *FilerStorePathTranlator) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { + if t.storeRoot == "/" { + return t.actualStore.FindEntry(ctx, fp) + } + newFullPath := t.translatePath(fp) + entry, err = t.actualStore.FindEntry(ctx, newFullPath) + if err == nil { + entry.FullPath = fp[:len(t.storeRoot)-1] + entry.FullPath + } + return +} + +func (t *FilerStorePathTranlator) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + newFullPath := t.translatePath(fp) + return t.actualStore.DeleteEntry(ctx, newFullPath) +} + +func (t *FilerStorePathTranlator) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) { + + previousPath := t.changeEntryPath(existingEntry) + defer t.recoverEntryPath(existingEntry, previousPath) + + return t.actualStore.DeleteEntry(ctx, existingEntry.FullPath) +} + +func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { + newFullPath := t.translatePath(fp) + + return t.actualStore.DeleteFolderChildren(ctx, newFullPath) +} + +func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) { + + newFullPath := t.translatePath(dirPath) + + return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath + return eachEntryFunc(entry) + }) +} + +func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) { + + newFullPath := t.translatePath(dirPath) + + return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool { + entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath + return eachEntryFunc(entry) + }) +} + +func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) { + return t.actualStore.BeginTransaction(ctx) +} + +func (t *FilerStorePathTranlator) CommitTransaction(ctx context.Context) error { + return t.actualStore.CommitTransaction(ctx) +} + +func (t *FilerStorePathTranlator) RollbackTransaction(ctx context.Context) error { + return t.actualStore.RollbackTransaction(ctx) +} + +func (t *FilerStorePathTranlator) Shutdown() { + t.actualStore.Shutdown() +} + +func (t *FilerStorePathTranlator) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return t.actualStore.KvPut(ctx, key, value) +} +func (t *FilerStorePathTranlator) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return t.actualStore.KvGet(ctx, key) +} +func (t *FilerStorePathTranlator) KvDelete(ctx context.Context, key []byte) (err error) { + return t.actualStore.KvDelete(ctx, key) +} diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go new file mode 100644 index 000000000..cd7c0bea3 --- /dev/null +++ b/weed/filer/filerstore_wrapper.go @@ -0,0 +1,322 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/viant/ptrie" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + _ = VirtualFilerStore(&FilerStoreWrapper{}) +) + +type VirtualFilerStore interface { + FilerStore + DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error + DeleteOneEntry(ctx context.Context, entry *Entry) error + AddPathSpecificStore(path string, storeId string, store FilerStore) + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) +} + +type FilerStoreWrapper struct { + defaultStore FilerStore + pathToStore ptrie.Trie + storeIdToStore map[string]FilerStore +} + +func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { + if innerStore, ok := store.(*FilerStoreWrapper); ok { + return innerStore + } + return &FilerStoreWrapper{ + defaultStore: store, + pathToStore: ptrie.New(), + storeIdToStore: make(map[string]FilerStore), + } +} + +func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } +} +func (fsw *FilerStoreWrapper) OnBucketDeletion(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } +} + +func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, store FilerStore) { + fsw.storeIdToStore[storeId] = NewFilerStorePathTranlator(path, store) + err := fsw.pathToStore.Put([]byte(path), storeId) + if err != nil { + glog.Fatalf("put path specific store: %v", err) + } +} + +func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) { + store = fsw.defaultStore + if path == "/" { + return + } + var storeId string + fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + storeId = value.(string) + return false + }) + if storeId != "" { + store = fsw.storeIdToStore[storeId] + } + return +} + +func (fsw *FilerStoreWrapper) getDefaultStore() (store FilerStore) { + return fsw.defaultStore +} + +func (fsw *FilerStoreWrapper) GetName() string { + return fsw.getDefaultStore().GetName() +} + +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.getDefaultStore().Initialize(configuration, prefix) +} + +func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { + actualStore := fsw.getActualStore(entry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "insert").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + if entry.Mime == "application/octet-stream" { + entry.Mime = "" + } + + if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil { + return err + } + + glog.V(4).Infof("InsertEntry %s", entry.FullPath) + return actualStore.InsertEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { + actualStore := fsw.getActualStore(entry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "update").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + if entry.Mime == "application/octet-stream" { + entry.Mime = "" + } + + if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil { + return err + } + + glog.V(4).Infof("UpdateEntry %s", entry.FullPath) + return actualStore.UpdateEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { + actualStore := fsw.getActualStore(fp) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "find").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) + }() + + entry, err = actualStore.FindEntry(ctx, fp) + glog.V(4).Infof("FindEntry %s: %v", fp, err) + if err != nil { + return nil, err + } + + fsw.maybeReadHardLink(ctx, entry) + + filer_pb.AfterEntryDeserialization(entry.Chunks) + return +} + +func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + actualStore := fsw.getActualStore(fp) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + existingEntry, findErr := fsw.FindEntry(ctx, fp) + if findErr == filer_pb.ErrNotFound { + return nil + } + if len(existingEntry.HardLinkId) != 0 { + // remove hard link + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + + glog.V(4).Infof("DeleteEntry %s", fp) + return actualStore.DeleteEntry(ctx, fp) +} + +func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) { + actualStore := fsw.getActualStore(existingEntry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + if len(existingEntry.HardLinkId) != 0 { + // remove hard link + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + + glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath) + return actualStore.DeleteEntry(ctx, existingEntry.FullPath) +} + +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { + actualStore := fsw.getActualStore(fp + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) + }() + + glog.V(4).Infof("DeleteFolderChildren %s", fp) + return actualStore.DeleteFolderChildren(ctx, fp) +} + +func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) { + actualStore := fsw.getActualStore(dirPath + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) + }() + + glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) + return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + fsw.maybeReadHardLink(ctx, entry) + filer_pb.AfterEntryDeserialization(entry.Chunks) + return eachEntryFunc(entry) + }) +} + +func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + actualStore := fsw.getActualStore(dirPath + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds()) + }() + glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) + lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc) + if err == ErrUnsupportedListDirectoryPrefixed { + lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool { + fsw.maybeReadHardLink(ctx, entry) + filer_pb.AfterEntryDeserialization(entry.Chunks) + return eachEntryFunc(entry) + }) + } + return lastFileName, err +} + +func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + actualStore := fsw.getActualStore(dirPath + "/") + + if prefix == "" { + return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc) + } + + var notPrefixed []*Entry + lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + notPrefixed = append(notPrefixed, entry) + return true + }) + if err != nil { + return + } + + count := int64(0) + for count < limit && len(notPrefixed) > 0 { + for _, entry := range notPrefixed { + if strings.HasPrefix(entry.Name(), prefix) { + count++ + if !eachEntryFunc(entry) { + return + } + if count >= limit { + break + } + } + } + if count < limit { + notPrefixed = notPrefixed[:0] + _, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool { + notPrefixed = append(notPrefixed, entry) + return true + }) + if err != nil { + return + } + } + } + return +} + +func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { + return fsw.getDefaultStore().BeginTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { + return fsw.getDefaultStore().CommitTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { + return fsw.getDefaultStore().RollbackTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) Shutdown() { + fsw.getDefaultStore().Shutdown() +} + +func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return fsw.getDefaultStore().KvPut(ctx, key, value) +} +func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return fsw.getDefaultStore().KvGet(ctx, key) +} +func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) { + return fsw.getDefaultStore().KvDelete(ctx, key) +} diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go new file mode 100644 index 000000000..e0d878ca7 --- /dev/null +++ b/weed/filer/hbase/hbase_store.go @@ -0,0 +1,231 @@ +package hbase + +import ( + "bytes" + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/tsuna/gohbase" + "github.com/tsuna/gohbase/hrpc" + "io" +) + +func init() { + filer.Stores = append(filer.Stores, &HbaseStore{}) +} + +type HbaseStore struct { + Client gohbase.Client + table []byte + cfKv string + cfMetaDir string + column string +} + +func (store *HbaseStore) GetName() string { + return "hbase" +} + +func (store *HbaseStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"zkquorum"), + configuration.GetString(prefix+"table"), + ) +} + +func (store *HbaseStore) initialize(zkquorum, table string) (err error) { + store.Client = gohbase.NewClient(zkquorum) + store.table = []byte(table) + store.cfKv = "kv" + store.cfMetaDir = "meta" + store.column = "a" + + // check table exists + key := "whatever" + headers := map[string][]string{store.cfMetaDir: nil} + get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers)) + if err != nil { + return fmt.Errorf("NewGet returned an error: %v", err) + } + _, err = store.Client.Get(get) + if err != gohbase.TableNotFound { + return nil + } + + // create table + adminClient := gohbase.NewAdminClient(zkquorum) + cFamilies := []string{store.cfKv, store.cfMetaDir} + cf := make(map[string]map[string]string, len(cFamilies)) + for _, f := range cFamilies { + cf[f] = nil + } + ct := hrpc.NewCreateTable(context.Background(), []byte(table), cf) + if err := adminClient.CreateTable(ct); err != nil { + return err + } + + return nil +} + +func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) error { + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + return store.doPut(ctx, store.cfMetaDir, []byte(entry.FullPath), value, entry.TtlSec) +} + +func (store *HbaseStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *HbaseStore) FindEntry(ctx context.Context, path util.FullPath) (entry *filer.Entry, err error) { + value, err := store.doGet(ctx, store.cfMetaDir, []byte(path)) + if err != nil { + if err == filer.ErrKvNotFound { + return nil, filer_pb.ErrNotFound + } + return nil, err + } + + entry = &filer.Entry{ + FullPath: path, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + return entry, nil +} + +func (store *HbaseStore) DeleteEntry(ctx context.Context, path util.FullPath) (err error) { + return store.doDelete(ctx, store.cfMetaDir, []byte(path)) +} + +func (store *HbaseStore) DeleteFolderChildren(ctx context.Context, path util.FullPath) (err error) { + + family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}} + expectedPrefix := []byte(path.Child("")) + scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family)) + if err != nil { + return err + } + + scanner := store.Client.Scan(scan) + defer scanner.Close() + for { + res, err := scanner.Next() + if err != nil { + break + } + if len(res.Cells) == 0 { + continue + } + cell := res.Cells[0] + + if !bytes.HasPrefix(cell.Row, expectedPrefix) { + break + } + fullpath := util.FullPath(cell.Row) + dir, _ := fullpath.DirAndName() + if dir != string(path) { + continue + } + + err = store.doDelete(ctx, store.cfMetaDir, cell.Row) + if err != nil { + break + } + + } + return +} + +func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}} + expectedPrefix := []byte(dirPath.Child(prefix)) + scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family)) + if err != nil { + return lastFileName, err + } + + scanner := store.Client.Scan(scan) + defer scanner.Close() + for { + res, err := scanner.Next() + if err == io.EOF { + break + } + if err != nil { + return lastFileName, err + } + if len(res.Cells) == 0 { + continue + } + cell := res.Cells[0] + + if !bytes.HasPrefix(cell.Row, expectedPrefix) { + break + } + + fullpath := util.FullPath(cell.Row) + dir, fileName := fullpath.DirAndName() + if dir != string(dirPath) { + continue + } + + value := cell.Value + + if fileName == startFileName && !includeStartFile { + continue + } + + limit-- + if limit < 0 { + break + } + + lastFileName = fileName + + entry := &filer.Entry{ + FullPath: fullpath, + } + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + + return lastFileName, nil +} + +func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (store *HbaseStore) CommitTransaction(ctx context.Context) error { + return nil +} + +func (store *HbaseStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *HbaseStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/hbase/hbase_store_kv.go b/weed/filer/hbase/hbase_store_kv.go new file mode 100644 index 000000000..990e55a24 --- /dev/null +++ b/weed/filer/hbase/hbase_store_kv.go @@ -0,0 +1,76 @@ +package hbase + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/tsuna/gohbase/hrpc" + "time" +) + +const ( + COLUMN_NAME = "a" +) + +func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return store.doPut(ctx, store.cfKv, key, value, 0) +} + +func (store *HbaseStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return store.doGet(ctx, store.cfKv, key) +} + +func (store *HbaseStore) KvDelete(ctx context.Context, key []byte) (err error) { + return store.doDelete(ctx, store.cfKv, key) +} + +func (store *HbaseStore) doPut(ctx context.Context, cf string, key, value []byte, ttlSecond int32) (err error) { + if ttlSecond > 0 { + return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal), hrpc.TTL(time.Duration(ttlSecond)*time.Second)) + } + return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal)) +} + +func (store *HbaseStore) doPutWithOptions(ctx context.Context, cf string, key, value []byte, options ...func(hrpc.Call) error) (err error) { + values := map[string]map[string][]byte{cf: map[string][]byte{}} + values[cf][COLUMN_NAME] = value + putRequest, err := hrpc.NewPut(ctx, store.table, key, values, options...) + if err != nil { + return err + } + _, err = store.Client.Put(putRequest) + if err != nil { + return err + } + return nil +} + +func (store *HbaseStore) doGet(ctx context.Context, cf string, key []byte) (value []byte, err error) { + family := map[string][]string{cf: {COLUMN_NAME}} + getRequest, err := hrpc.NewGet(context.Background(), store.table, key, hrpc.Families(family)) + if err != nil { + return nil, err + } + getResp, err := store.Client.Get(getRequest) + if err != nil { + return nil, err + } + if len(getResp.Cells) == 0 { + return nil, filer.ErrKvNotFound + } + + return getResp.Cells[0].Value, nil +} + +func (store *HbaseStore) doDelete(ctx context.Context, cf string, key []byte) (err error) { + values := map[string]map[string][]byte{cf: map[string][]byte{}} + values[cf][COLUMN_NAME] = nil + deleteRequest, err := hrpc.NewDel(ctx, store.table, key, values, hrpc.Durability(hrpc.AsyncWal)) + if err != nil { + return err + } + _, err = store.Client.Delete(deleteRequest) + if err != nil { + return err + } + return nil +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index 4952b3b3a..ce454f36a 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -4,13 +4,15 @@ import ( "bytes" "context" "fmt" - "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -19,7 +21,7 @@ const ( ) func init() { - filer2.Stores = append(filer2.Stores, &LevelDBStore{}) + filer.Stores = append(filer.Stores, &LevelDBStore{}) } type LevelDBStore struct { @@ -30,13 +32,14 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } func (store *LevelDBStore) initialize(dir string) (err error) { glog.Infof("filer store dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -48,8 +51,13 @@ func (store *LevelDBStore) initialize(dir string) (err error) { } if store.db, err = leveldb.OpenFile(dir, opts); err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) - return + if leveldb_errors.IsCorrupted(err) { + store.db, err = leveldb.RecoverFile(dir, opts) + } + if err != nil { + glog.Infof("filer store open dir %s: %v", dir, err) + return + } } return } @@ -64,7 +72,7 @@ func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error { return nil } -func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { key := genKey(entry.DirAndName()) value, err := entry.EncodeAttributesAndChunks() @@ -72,6 +80,10 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + err = store.db.Put(key, value, nil) if err != nil { @@ -83,27 +95,27 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) return nil } -func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(data) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData((data))) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -113,7 +125,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa return entry, nil } -func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -124,7 +136,7 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full return nil } -func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { batch := new(leveldb.Batch) @@ -152,12 +164,19 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath fi return nil } -func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") +func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil) + directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + iter := store.db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) for iter.Next() { key := iter.Key() if !bytes.HasPrefix(key, directoryPrefix) { @@ -167,26 +186,29 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath fi if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } } iter.Release() - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string) (key []byte) { @@ -196,7 +218,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { @@ -215,3 +237,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *LevelDBStore) Shutdown() { + store.db.Close() +} diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go new file mode 100644 index 000000000..f686cbf21 --- /dev/null +++ b/weed/filer/leveldb/leveldb_store_kv.go @@ -0,0 +1,45 @@ +package leveldb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.db.Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.db.Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.db.Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go new file mode 100644 index 000000000..d437895f5 --- /dev/null +++ b/weed/filer/leveldb/leveldb_store_test.go @@ -0,0 +1,115 @@ +package leveldb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestCreateAndFind(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := testFiler.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + // checking one upper directory + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func BenchmarkInsertEntry(b *testing.B) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + entry := &filer.Entry{ + FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)), + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + }, + } + store.InsertEntry(ctx, entry) + } +} diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index 8a16822ab..124d61c1c 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -5,20 +5,21 @@ import ( "context" "crypto/md5" "fmt" - "io" - "os" - "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "io" + "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { - filer2.Stores = append(filer2.Stores, &LevelDB2Store{}) + filer.Stores = append(filer.Stores, &LevelDB2Store{}) } type LevelDB2Store struct { @@ -30,13 +31,14 @@ func (store *LevelDB2Store) GetName() string { return "leveldb2" } -func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir, 8) } func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { glog.Infof("filer store leveldb2 dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -51,9 +53,12 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { dbFolder := fmt.Sprintf("%s/%02d", dir, d) os.MkdirAll(dbFolder, 0755) db, dbErr := leveldb.OpenFile(dbFolder, opts) + if leveldb_errors.IsCorrupted(dbErr) { + db, dbErr = leveldb.RecoverFile(dbFolder, opts) + } if dbErr != nil { glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) - return + return dbErr } store.dbs = append(store.dbs, db) } @@ -72,7 +77,7 @@ func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error { return nil } -func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { dir, name := entry.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -81,6 +86,10 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + err = store.dbs[partitionId].Put(key, value, nil) if err != nil { @@ -92,28 +101,28 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry return nil } -func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) data, err := store.dbs[partitionId].Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(data) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data)) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -123,7 +132,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP return entry, nil } -func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -135,7 +144,7 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful return nil } -func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) batch := new(leveldb.Batch) @@ -163,11 +172,17 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath f return nil } -func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) - lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount) + directoryPrefix, partitionId := genDirectoryKeyPrefix(dirPath, prefix, store.dbCount) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart, _ = genDirectoryKeyPrefix(dirPath, startFileName, store.dbCount) + } iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) for iter.Next() { @@ -179,29 +194,31 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath f if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } // println("list", entry.FullPath, "chunks", len(entry.Chunks)) - - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } } iter.Release() - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) { @@ -210,7 +227,7 @@ func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) return key, partitionId } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount) if len(startFileName) > 0 { keyPrefix = append(keyPrefix, []byte(startFileName)...) @@ -235,3 +252,9 @@ func hashToBytes(dir string, dbCount int) ([]byte, int) { return b, int(x) % dbCount } + +func (store *LevelDB2Store) Shutdown() { + for d := 0; d < store.dbCount; d++ { + store.dbs[d].Close() + } +} diff --git a/weed/filer/leveldb2/leveldb2_store_kv.go b/weed/filer/leveldb2/leveldb2_store_kv.go new file mode 100644 index 000000000..b415d3c32 --- /dev/null +++ b/weed/filer/leveldb2/leveldb2_store_kv.go @@ -0,0 +1,56 @@ +package leveldb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDB2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + err = store.dbs[partitionId].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv bucket %d put: %v", partitionId, err) + } + + return nil +} + +func (store *LevelDB2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + value, err = store.dbs[partitionId].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv bucket %d get: %v", partitionId, err) + } + + return +} + +func (store *LevelDB2Store) KvDelete(ctx context.Context, key []byte) (err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + err = store.dbs[partitionId].Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv bucket %d delete: %v", partitionId, err) + } + + return nil +} + +func bucketKvKey(key []byte, dbCount int) (partitionId int) { + return int(key[len(key)-1]) % dbCount +} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go index e28ef7dac..fd0ad18a3 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer/leveldb2/leveldb2_store_test.go @@ -2,40 +2,41 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() - entry1 := &filer2.Entry{ + entry1 := &filer.Entry{ FullPath: fullpath, - Attr: filer2.Attr{ + Attr: filer.Attr{ Mode: 0440, Uid: 1234, Gid: 5678, }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(ctx, fullpath) + entry, err := testFiler.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go new file mode 100644 index 000000000..d1cdfbbf6 --- /dev/null +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -0,0 +1,376 @@ +package leveldb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "io" + "os" + "strings" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DEFAULT = "_main" +) + +func init() { + filer.Stores = append(filer.Stores, &LevelDB3Store{}) +} + +type LevelDB3Store struct { + dir string + dbs map[string]*leveldb.DB + dbsLock sync.RWMutex +} + +func (store *LevelDB3Store) GetName() string { + return "leveldb3" +} + +func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") + return store.initialize(dir) +} + +func (store *LevelDB3Store) initialize(dir string) (err error) { + glog.Infof("filer store leveldb3 dir: %s", dir) + os.MkdirAll(dir, 0755) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + store.dir = dir + + db, loadDbErr := store.loadDB(DEFAULT) + if loadDbErr != nil { + return loadDbErr + } + store.dbs = make(map[string]*leveldb.DB) + store.dbs[DEFAULT] = db + + return +} + +func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + if name != DEFAULT { + opts = &opt.Options{ + BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + } + + dbFolder := fmt.Sprintf("%s/%s", store.dir, name) + os.MkdirAll(dbFolder, 0755) + db, dbErr := leveldb.OpenFile(dbFolder, opts) + if leveldb_errors.IsCorrupted(dbErr) { + db, dbErr = leveldb.RecoverFile(dbFolder, opts) + } + if dbErr != nil { + glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + return nil, dbErr + } + return db, nil +} + +func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) { + + store.dbsLock.RLock() + + defaultDB := store.dbs[DEFAULT] + if !strings.HasPrefix(string(fullpath), "/buckets/") { + store.dbsLock.RUnlock() + return defaultDB, DEFAULT, fullpath, nil + } + + // detect bucket + bucketAndObjectKey := string(fullpath)[len("/buckets/"):] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 && !isForChildren { + store.dbsLock.RUnlock() + return defaultDB, DEFAULT, fullpath, nil + } + bucket := bucketAndObjectKey + shortPath := weed_util.FullPath("/") + if t > 0 { + bucket = bucketAndObjectKey[:t] + shortPath = weed_util.FullPath(bucketAndObjectKey[t:]) + } + + if db, found := store.dbs[bucket]; found { + store.dbsLock.RUnlock() + return db, bucket, shortPath, nil + } + + store.dbsLock.RUnlock() + // upgrade to write lock + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + // double check after getting the write lock + if db, found := store.dbs[bucket]; found { + return db, bucket, shortPath, nil + } + + // create db + db, err := store.loadDB(bucket) + if err != nil { + return nil, bucket, shortPath, err + } + store.dbs[bucket] = db + + return db, bucket, shortPath, nil +} + +func (store *LevelDB3Store) closeDB(bucket string) { + + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + if db, found := store.dbs[bucket]; found { + db.Close() + delete(store.dbs, bucket) + } + +} + +func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, _, shortPath, err := store.findDB(entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + + err = db.Put(key, value, nil) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + + db, _, shortPath, err := store.findDB(fullpath, false) + if err != nil { + return nil, fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + data, err := db.Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer_pb.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + + db, _, shortPath, err := store.findDB(fullpath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + err = db.Delete(key, nil) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + + db, bucket, shortPath, err := store.findDB(fullpath, true) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + if bucket != DEFAULT && shortPath == "/" { + store.closeDB(bucket) + if bucket != "" { // just to make sure + os.RemoveAll(store.dir + "/" + bucket) + } + return nil + } + + directoryPrefix := genDirectoryKeyPrefix(shortPath, "") + + batch := new(leveldb.Batch) + + iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete(append(directoryPrefix, []byte(fileName)...)) + } + iter.Release() + + err = db.Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + db, _, shortPath, err := store.findDB(dirPath, true) + if err != nil { + return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err) + } + + directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName) + } + + iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + if fileName == startFileName && !includeStartFile { + continue + } + limit-- + if limit < 0 { + break + } + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), + } + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + iter.Release() + + return lastFileName, err +} + +func genKey(dirPath, fileName string) (key []byte) { + key = hashToBytes(dirPath) + key = append(key, []byte(fileName)...) + return key +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { + keyPrefix = hashToBytes(string(fullpath)) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory +func hashToBytes(dir string) []byte { + h := md5.New() + io.WriteString(h, dir) + b := h.Sum(nil) + return b +} + +func (store *LevelDB3Store) Shutdown() { + for _, db := range store.dbs { + db.Close() + } +} diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go new file mode 100644 index 000000000..18d782b80 --- /dev/null +++ b/weed/filer/leveldb3/leveldb3_store_kv.go @@ -0,0 +1,46 @@ +package leveldb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.dbs[DEFAULT].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.dbs[DEFAULT].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.dbs[DEFAULT].Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go index 904de8c97..0b970a539 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer/leveldb3/leveldb3_store_test.go @@ -2,40 +2,41 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) - store := &LevelDBStore{} + store := &LevelDB3Store{} store.initialize(dir) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() - entry1 := &filer2.Entry{ + entry1 := &filer.Entry{ FullPath: fullpath, - Attr: filer2.Attr{ + Attr: filer.Attr{ Mode: 0440, Uid: 1234, Gid: 5678, }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(ctx, fullpath) + entry, err := testFiler.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) - store := &LevelDBStore{} + store := &LevelDB3Store{} store.initialize(dir) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go new file mode 100644 index 000000000..5c368a57e --- /dev/null +++ b/weed/filer/meta_aggregator.go @@ -0,0 +1,213 @@ +package filer + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type MetaAggregator struct { + filers []string + grpcDialOption grpc.DialOption + MetaLogBuffer *log_buffer.LogBuffer + // notifying clients + ListenersLock sync.Mutex + ListenersCond *sync.Cond +} + +// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk. +// The old data comes from what each LocalMetadata persisted on disk. +func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator { + t := &MetaAggregator{ + filers: filers, + grpcDialOption: grpcDialOption, + } + t.ListenersCond = sync.NewCond(&t.ListenersLock) + t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() { + t.ListenersCond.Broadcast() + }) + return t +} + +func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self string) { + for _, filer := range ma.filers { + go ma.subscribeToOneFiler(f, self, filer) + } +} + +func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string) { + + /* + Each filer reads the "filer.store.id", which is the store's signature when filer starts. + + When reading from other filers' local meta changes: + * if the received change does not contain signature from self, apply the change to current filer store. + + Upon connecting to other filers, need to remember their signature and their offsets. + + */ + + var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse) + lastPersistTime := time.Now() + lastTsNs := time.Now().Add(-LogFlushInterval).UnixNano() + + peerSignature, err := ma.readFilerStoreSignature(peer) + for err != nil { + glog.V(0).Infof("connecting to peer filer %s: %v", peer, err) + time.Sleep(1357 * time.Millisecond) + peerSignature, err = ma.readFilerStoreSignature(peer) + } + + // when filer store is not shared by multiple filers + if peerSignature != f.Signature { + if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil { + lastTsNs = prevTsNs + } + + glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + var counter int64 + var synced bool + maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { + if err := Replay(f.Store, event); err != nil { + glog.Errorf("failed to reply metadata change from %v: %v", peer, err) + return + } + counter++ + if lastPersistTime.Add(time.Minute).Before(time.Now()) { + if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { + if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { + glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) + } else if !synced { + synced = true + glog.V(0).Infof("synced with %s", peer) + } + lastPersistTime = time.Now() + counter = 0 + } else { + glog.V(0).Infof("failed to update offset for %v: %v", peer, err) + } + } + } + } + + processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return err + } + dir := event.Directory + // println("received meta change", dir, "size", len(data)) + ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, 0) + if maybeReplicateMetadataChange != nil { + maybeReplicateMetadataChange(event) + } + return nil + } + + for { + err := pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "filer:" + self, + PathPrefix: "/", + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + + f.onMetadataChangeEvent(resp) + + } + }) + if err != nil { + glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) + time.Sleep(1733 * time.Millisecond) + } + } +} + +func (ma *MetaAggregator) readFilerStoreSignature(peer string) (sig int32, err error) { + err = pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + sig = resp.Signature + return nil + }) + return +} + +const ( + MetaOffsetPrefix = "Meta" +) + +func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) (lastTsNs int64, err error) { + + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) + + value, err := f.Store.KvGet(context.Background(), key) + + if err == ErrKvNotFound { + glog.Warningf("readOffset %s not found", peer) + return 0, nil + } + + if err != nil { + return 0, fmt.Errorf("readOffset %s : %v", peer, err) + } + + lastTsNs = int64(util.BytesToUint64(value)) + + glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs) + + return +} + +func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int32, lastTsNs int64) (err error) { + + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) + + value := make([]byte, 8) + util.Uint64toBytes(value, uint64(lastTsNs)) + + err = f.Store.KvPut(context.Background(), key, value) + + if err != nil { + return fmt.Errorf("updateOffset %s : %v", peer, err) + } + + glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs) + + return +} diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go new file mode 100644 index 000000000..feb76278b --- /dev/null +++ b/weed/filer/meta_replay.go @@ -0,0 +1,37 @@ +package filer + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + var oldPath util.FullPath + var newEntry *Entry + if message.OldEntry != nil { + oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) + glog.V(4).Infof("deleting %v", oldPath) + if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { + return err + } + } + + if message.NewEntry != nil { + dir := resp.Directory + if message.NewParentPath != "" { + dir = message.NewParentPath + } + key := util.NewFullPath(dir, message.NewEntry.Name) + glog.V(4).Infof("creating %v", key) + newEntry = FromPbEntry(dir, message.NewEntry) + if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { + return err + } + } + + return nil +} diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go new file mode 100644 index 000000000..1ef5056f4 --- /dev/null +++ b/weed/filer/mongodb/mongodb_store.go @@ -0,0 +1,229 @@ +package mongodb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" + "time" +) + +func init() { + filer.Stores = append(filer.Stores, &MongodbStore{}) +} + +type MongodbStore struct { + connect *mongo.Client + database string + collectionName string +} + +type Model struct { + Directory string `bson:"directory"` + Name string `bson:"name"` + Meta []byte `bson:"meta"` +} + +func (store *MongodbStore) GetName() string { + return "mongodb" +} + +func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) { + store.database = configuration.GetString(prefix + "database") + store.collectionName = "filemeta" + poolSize := configuration.GetInt(prefix + "option_pool_size") + return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize)) +} + +func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + opts := options.Client().ApplyURI(uri) + + if poolSize > 0 { + opts.SetMaxPoolSize(poolSize) + } + + client, err := mongo.Connect(ctx, opts) + if err != nil { + return err + } + + c := client.Database(store.database).Collection(store.collectionName) + err = store.indexUnique(c) + store.connect = client + return err +} + +func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error { + _, err := c.Indexes().CreateOne(context.Background(), index, opts) + return err +} + +func (store *MongodbStore) indexUnique(c *mongo.Collection) error { + opts := options.CreateIndexes().SetMaxTime(10 * time.Second) + + unique := new(bool) + *unique = true + + index := mongo.IndexModel{ + Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}}, + Options: &options.IndexOptions{ + Unique: unique, + }, + } + + return store.createIndex(c, index, opts) +} + +func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (store *MongodbStore) CommitTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.UpdateEntry(ctx, entry) + +} + +func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + dir, name := entry.FullPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + c := store.connect.Database(store.database).Collection(store.collectionName) + + opts := options.Update().SetUpsert(true) + filter := bson.D{{"directory", dir}, {"name", name}} + update := bson.D{{"$set", bson.D{{"meta", meta}}}} + + _, err = c.UpdateOne(ctx, filter, update, opts) + + if err != nil { + return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err) + } + + return nil +} + +func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + dir, name := fullpath.DirAndName() + var data Model + + var where = bson.M{"directory": dir, "name": name} + err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) + if err != mongo.ErrNoDocuments && err != nil { + glog.Errorf("find %s: %v", fullpath, err) + return nil, filer_pb.ErrNotFound + } + + if len(data.Meta) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + dir, name := fullpath.DirAndName() + + where := bson.M{"directory": dir, "name": name} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + where := bson.M{"directory": fullpath} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}} + if includeStartFile { + where["name"] = bson.M{ + "$gte": startFileName, + } + } + optLimit := int64(limit) + opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}} + cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts) + for cur.Next(ctx) { + var data Model + err := cur.Decode(&data) + if err != nil && err != mongo.ErrNoDocuments { + return lastFileName, err + } + + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), data.Name), + } + lastFileName = data.Name + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + + if !eachEntryFunc(entry) { + break + } + + } + + if err := cur.Close(ctx); err != nil { + glog.V(0).Infof("list iterator close: %v", err) + } + + return lastFileName, err +} + +func (store *MongodbStore) Shutdown() { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + store.connect.Disconnect(ctx) +} diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go new file mode 100644 index 000000000..4aa9c3a33 --- /dev/null +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -0,0 +1,72 @@ +package mongodb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + dir, name := genDirAndName(key) + + c := store.connect.Database(store.database).Collection(store.collectionName) + + _, err = c.InsertOne(ctx, Model{ + Directory: dir, + Name: name, + Meta: value, + }) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + dir, name := genDirAndName(key) + + var data Model + + var where = bson.M{"directory": dir, "name": name} + err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) + if err != mongo.ErrNoDocuments && err != nil { + glog.Errorf("kv get: %v", err) + return nil, filer.ErrKvNotFound + } + + if len(data.Meta) == 0 { + return nil, filer.ErrKvNotFound + } + + return data.Meta, nil +} + +func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) { + + dir, name := genDirAndName(key) + + where := bson.M{"directory": dir, "name": name} + _, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} + +func genDirAndName(key []byte) (dir string, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dir = string(key[:8]) + name = string(key[8:]) + + return +} diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go new file mode 100644 index 000000000..93d3e3f9e --- /dev/null +++ b/weed/filer/mysql/mysql_sql_gen.go @@ -0,0 +1,58 @@ +package mysql + +import ( + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + _ "github.com/go-sql-driver/mysql" +) + +type SqlGenMysql struct { + CreateTableSqlTemplate string + DropTableSqlTemplate string + UpsertQueryTemplate string +} + +var ( + _ = abstract_sql.SqlGenerator(&SqlGenMysql{}) +) + +func (gen *SqlGenMysql) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName) + } +} + +func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { + return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlFind(tableName string) string { + return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) +} + +func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) +} + +func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) +} + +func (gen *SqlGenMysql) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) +} diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go new file mode 100644 index 000000000..fbaa4d5f9 --- /dev/null +++ b/weed/filer/mysql/mysql_store.go @@ -0,0 +1,84 @@ +package mysql + +import ( + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/go-sql-driver/mysql" +) + +const ( + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" +) + +func init() { + filer.Stores = append(filer.Stores, &MysqlStore{}) +} + +type MysqlStore struct { + abstract_sql.AbstractSqlStore +} + +func (store *MysqlStore) GetName() string { + return "mysql" +} + +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + configuration.GetBool(prefix+"interpolateParams"), + ) +} + +func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, + maxLifetimeSeconds int, interpolateParams bool) (err error) { + + store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &SqlGenMysql{ + CreateTableSqlTemplate: "", + DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + adaptedSqlUrl += "&interpolateParams=true" + } + + var dbErr error + store.DB, dbErr = sql.Open("mysql", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + return nil +} diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go new file mode 100644 index 000000000..a1f54455a --- /dev/null +++ b/weed/filer/mysql2/mysql2_store.go @@ -0,0 +1,90 @@ +package mysql2 + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/mysql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/go-sql-driver/mysql" +) + +const ( + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" +) + +func init() { + filer.Stores = append(filer.Stores, &MysqlStore2{}) +} + +type MysqlStore2 struct { + abstract_sql.AbstractSqlStore +} + +func (store *MysqlStore2) GetName() string { + return "mysql2" +} + +func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + configuration.GetBool(prefix+"interpolateParams"), + ) +} + +func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, + maxLifetimeSeconds int, interpolateParams bool) (err error) { + + store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &mysql.SqlGenMysql{ + CreateTableSqlTemplate: createTable, + DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "<ADAPTED>", hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + adaptedSqlUrl += "&interpolateParams=true" + } + + var dbErr error + store.DB, dbErr = sql.Open("mysql", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { + return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) + } + + return nil +} diff --git a/weed/filer2/permission.go b/weed/filer/permission.go index 8a9508fbc..0d8b8292b 100644 --- a/weed/filer2/permission.go +++ b/weed/filer/permission.go @@ -1,4 +1,4 @@ -package filer2 +package filer func hasWritePermission(dir *Entry, entry *Entry) bool { diff --git a/weed/filer2/postgres/README.txt b/weed/filer/postgres/README.txt index cb0c99c63..cb0c99c63 100644 --- a/weed/filer2/postgres/README.txt +++ b/weed/filer/postgres/README.txt diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go new file mode 100644 index 000000000..6cee3d2da --- /dev/null +++ b/weed/filer/postgres/postgres_sql_gen.go @@ -0,0 +1,58 @@ +package postgres + +import ( + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + _ "github.com/lib/pq" +) + +type SqlGenPostgres struct { + CreateTableSqlTemplate string + DropTableSqlTemplate string + UpsertQueryTemplate string +} + +var ( + _ = abstract_sql.SqlGenerator(&SqlGenPostgres{}) +) + +func (gen *SqlGenPostgres) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, tableName) + } +} + +func (gen *SqlGenPostgres) GetSqlUpdate(tableName string) string { + return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlFind(tableName string) string { + return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDelete(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) +} diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go new file mode 100644 index 000000000..a1e16a92a --- /dev/null +++ b/weed/filer/postgres/postgres_store.go @@ -0,0 +1,93 @@ +package postgres + +import ( + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" +) + +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" +) + +func init() { + filer.Stores = append(filer.Stores, &PostgresStore{}) +} + +type PostgresStore struct { + abstract_sql.AbstractSqlStore +} + +func (store *PostgresStore) GetName() string { + return "postgres" +} + +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"schema"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + ) +} + +func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { + + store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &SqlGenPostgres{ + CreateTableSqlTemplate: "", + DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) + if user != "" { + sqlUrl += " user=" + user + } + adaptedSqlUrl := sqlUrl + if password != "" { + sqlUrl += " password=" + password + adaptedSqlUrl += " password=ADAPTED" + } + if database != "" { + sqlUrl += " dbname=" + database + adaptedSqlUrl += " dbname=" + database + } + if schema != "" { + sqlUrl += " search_path=" + schema + adaptedSqlUrl += " search_path=" + schema + } + var dbErr error + store.DB, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + return nil +} diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go new file mode 100644 index 000000000..0f573d8d0 --- /dev/null +++ b/weed/filer/postgres2/postgres2_store.go @@ -0,0 +1,100 @@ +package postgres2 + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/postgres" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" +) + +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" +) + +func init() { + filer.Stores = append(filer.Stores, &PostgresStore2{}) +} + +type PostgresStore2 struct { + abstract_sql.AbstractSqlStore +} + +func (store *PostgresStore2) GetName() string { + return "postgres2" +} + +func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"schema"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + ) +} + +func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { + + store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &postgres.SqlGenPostgres{ + CreateTableSqlTemplate: createTable, + DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) + if user != "" { + sqlUrl += " user=" + user + } + adaptedSqlUrl := sqlUrl + if password != "" { + sqlUrl += " password=" + password + adaptedSqlUrl += " password=ADAPTED" + } + if database != "" { + sqlUrl += " dbname=" + database + adaptedSqlUrl += " dbname=" + database + } + if schema != "" { + sqlUrl += " search_path=" + schema + adaptedSqlUrl += " search_path=" + schema + } + var dbErr error + store.DB, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { + return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) + } + + return nil +} diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go new file mode 100644 index 000000000..d92d526d5 --- /dev/null +++ b/weed/filer/read_write.go @@ -0,0 +1,116 @@ +package filer + +import ( + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "io/ioutil" + "math" + "net/http" + "time" +) + +func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.SeaweedFilerClient, dir, name string, byteBuffer *bytes.Buffer) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + respLookupEntry, err := filer_pb.LookupEntry(filerClient, request) + if err != nil { + return err + } + if len(respLookupEntry.Entry.Content) > 0 { + _, err = byteBuffer.Write(respLookupEntry.Entry.Content) + return err + } + + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + +} + +func ReadContent(filerAddress string, dir, name string) ([]byte, error) { + + target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name) + + data, _, err := util.Get(target) + + return data, err +} + +func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error { + var target string + if port == 0 { + target = fmt.Sprintf("http://%s%s/%s", host, dir, name) + } else { + target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name) + } + + // set the HTTP method, url, and request body + req, err := http.NewRequest(http.MethodPut, target, byteBuffer) + if err != nil { + return err + } + + // set the request header Content-Type for json + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer util.CloseResponse(resp) + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode >= 400 { + return fmt.Errorf("%s: %s %v", target, resp.Status, string(b)) + } + + return nil + +} + +func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + }) + + if err == filer_pb.ErrNotFound { + err = filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: false, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0644), + Collection: "", + Replication: "", + FileSize: uint64(len(content)), + }, + Content: content, + }, + }) + } else if err == nil { + entry := resp.Entry + entry.Content = content + entry.Attributes.Mtime = time.Now().Unix() + entry.Attributes.FileSize = uint64(len(content)) + err = filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: dir, + Entry: entry, + }) + } + + return err +} diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go new file mode 100644 index 000000000..a1e989684 --- /dev/null +++ b/weed/filer/reader_at.go @@ -0,0 +1,229 @@ +package filer + +import ( + "context" + "fmt" + "io" + "math/rand" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/golang/groupcache/singleflight" +) + +type ChunkReadAt struct { + masterClient *wdclient.MasterClient + chunkViews []*ChunkView + lookupFileId wdclient.LookupFileIdFunctionType + readerLock sync.Mutex + fileSize int64 + + fetchGroup singleflight.Group + chunkCache chunk_cache.ChunkCache + lastChunkFileId string + lastChunkData []byte +} + +var _ = io.ReaderAt(&ChunkReadAt{}) +var _ = io.Closer(&ChunkReadAt{}) + +func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionType { + + vidCache := make(map[string]*filer_pb.Locations) + var vicCacheLock sync.RWMutex + return func(fileId string) (targetUrls []string, err error) { + vid := VolumeId(fileId) + vicCacheLock.RLock() + locations, found := vidCache[vid] + vicCacheLock.RUnlock() + + if !found { + util.Retry("lookup volume "+vid, func() error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return err + } + + locations = resp.LocationsMap[vid] + if locations == nil || len(locations.Locations) == 0 { + glog.V(0).Infof("failed to locate %s", fileId) + return fmt.Errorf("failed to locate %s", fileId) + } + vicCacheLock.Lock() + vidCache[vid] = locations + vicCacheLock.Unlock() + + return nil + }) + return err + }) + } + + if err != nil { + return nil, err + } + + for _, loc := range locations.Locations { + volumeServerAddress := filerClient.AdjustedUrl(loc) + targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) + targetUrls = append(targetUrls, targetUrl) + } + + for i := len(targetUrls) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + targetUrls[i], targetUrls[j] = targetUrls[j], targetUrls[i] + } + + return + } +} + +func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { + + return &ChunkReadAt{ + chunkViews: chunkViews, + lookupFileId: lookupFn, + chunkCache: chunkCache, + fileSize: fileSize, + } +} + +func (c *ChunkReadAt) Close() error { + c.lastChunkData = nil + c.lastChunkFileId = "" + return nil +} + +func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { + + c.readerLock.Lock() + defer c.readerLock.Unlock() + + glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + return c.doReadAt(p, offset) +} + +func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { + + startOffset, remaining := offset, int64(len(p)) + var nextChunk *ChunkView + for i, chunk := range c.chunkViews { + if remaining <= 0 { + break + } + if i+1 < len(c.chunkViews) { + nextChunk = c.chunkViews[i+1] + } else { + nextChunk = nil + } + if startOffset < chunk.LogicOffset { + gap := int(chunk.LogicOffset - startOffset) + glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap)) + n += int(min(int64(gap), remaining)) + startOffset, remaining = chunk.LogicOffset, remaining-int64(gap) + if remaining <= 0 { + break + } + } + // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining) + if chunkStart >= chunkStop { + continue + } + glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) + var buffer []byte + buffer, err = c.readFromWholeChunkData(chunk, nextChunk) + if err != nil { + glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + return + } + bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset + copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer[bufferOffset:bufferOffset+chunkStop-chunkStart]) + n += copied + startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) + } + + glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) + + if err == nil && remaining > 0 && c.fileSize > startOffset { + delta := int(min(remaining, c.fileSize-startOffset)) + glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize) + n += delta + } + + if err == nil && offset+int64(len(p)) >= c.fileSize { + err = io.EOF + } + // fmt.Printf("~~~ filled %d, err: %v\n\n", n, err) + + return + +} + +func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) { + + if c.lastChunkFileId == chunkView.FileId { + return c.lastChunkData, nil + } + + v, doErr := c.readOneWholeChunk(chunkView) + + if doErr != nil { + return nil, doErr + } + + chunkData = v.([]byte) + + c.lastChunkData = chunkData + c.lastChunkFileId = chunkView.FileId + + for _, nextChunkView := range nextChunkViews { + if c.chunkCache != nil && nextChunkView != nil { + go c.readOneWholeChunk(nextChunkView) + } + } + + return +} + +func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, error) { + + var err error + + return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) { + + glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize) + + data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) + if data != nil { + glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data))) + } else { + var err error + data, err = c.doFetchFullChunkData(chunkView) + if err != nil { + return data, err + } + c.chunkCache.SetChunk(chunkView.FileId, data) + } + return data, err + }) +} + +func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) { + + glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId) + + data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) + + glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId) + + return data, err + +} diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go new file mode 100644 index 000000000..37a34f4ea --- /dev/null +++ b/weed/filer/reader_at_test.go @@ -0,0 +1,156 @@ +package filer + +import ( + "fmt" + "io" + "math" + "strconv" + "sync" + "testing" +) + +type mockChunkCache struct { +} + +func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) { + x, _ := strconv.Atoi(fileId) + data = make([]byte, minSize) + for i := 0; i < int(minSize); i++ { + data[i] = byte(x) + } + return data +} +func (m *mockChunkCache) SetChunk(fileId string, data []byte) { +} + +func TestReaderAt(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 1, + stop: 2, + fileId: "1", + chunkSize: 9, + }, + { + start: 3, + stop: 4, + fileId: "3", + chunkSize: 1, + }, + { + start: 5, + stop: 6, + fileId: "5", + chunkSize: 2, + }, + { + start: 7, + stop: 9, + fileId: "7", + chunkSize: 2, + }, + { + start: 9, + stop: 10, + fileId: "9", + chunkSize: 2, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 10, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 0, 12, 10, io.EOF) + testReadAt(t, readerAt, 2, 8, 8, io.EOF) + testReadAt(t, readerAt, 3, 6, 6, nil) + +} + +func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) { + data := make([]byte, size) + n, err := readerAt.ReadAt(data, offset) + + for _, d := range data { + fmt.Printf("%x", d) + } + fmt.Println() + + if expected != n { + t.Errorf("unexpected read size: %d, expect: %d", n, expected) + } + if err != expectedErr { + t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr) + } + +} + +func TestReaderAt0(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + { + start: 7, + stop: 9, + fileId: "2", + chunkSize: 9, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 10, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 3, 16, 7, io.EOF) + testReadAt(t, readerAt, 3, 5, 5, nil) + + testReadAt(t, readerAt, 11, 5, 0, io.EOF) + testReadAt(t, readerAt, 10, 5, 0, io.EOF) + +} + +func TestReaderAt1(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 20, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 20, 20, io.EOF) + testReadAt(t, readerAt, 1, 7, 7, nil) + testReadAt(t, readerAt, 0, 1, 1, nil) + testReadAt(t, readerAt, 18, 4, 2, io.EOF) + testReadAt(t, readerAt, 12, 4, 4, nil) + testReadAt(t, readerAt, 4, 20, 16, io.EOF) + testReadAt(t, readerAt, 4, 10, 10, nil) + testReadAt(t, readerAt, 1, 10, 10, nil) + +} diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go index f1ad4b35c..9572058a8 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer/redis/redis_cluster_store.go @@ -1,13 +1,13 @@ package redis import ( - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/go-redis/redis" + "github.com/go-redis/redis/v8" ) func init() { - filer2.Stores = append(filer2.Stores, &RedisClusterStore{}) + filer.Stores = append(filer.Stores, &RedisClusterStore{}) } type RedisClusterStore struct { @@ -18,16 +18,16 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { - configuration.SetDefault("useReadOnly", true) - configuration.SetDefault("routeByLatency", true) + configuration.SetDefault(prefix+"useReadOnly", false) + configuration.SetDefault(prefix+"routeByLatency", false) return store.initialize( - configuration.GetStringSlice("addresses"), - configuration.GetString("password"), - configuration.GetBool("useReadOnly"), - configuration.GetBool("routeByLatency"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer/redis/redis_store.go index c56fa014c..665352a63 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer/redis/redis_store.go @@ -1,13 +1,13 @@ package redis import ( - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/go-redis/redis" + "github.com/go-redis/redis/v8" ) func init() { - filer2.Stores = append(filer2.Stores, &RedisStore{}) + filer.Stores = append(filer.Stores, &RedisStore{}) } type RedisStore struct { @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 62257e91e..30d11a7f4 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -3,12 +3,16 @@ package redis import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/go-redis/redis" "sort" "strings" "time" + + "github.com/go-redis/redis/v8" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -29,14 +33,18 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error return nil } -func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - _, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() if err != nil { return fmt.Errorf("persisting %s : %v", entry.FullPath, err) @@ -44,7 +52,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2 dir, name := entry.FullPath.DirAndName() if name != "" { - _, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result() + _, err = store.Client.SAdd(ctx, genDirectoryListKey(dir), name).Result() if err != nil { return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) } @@ -53,26 +61,26 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2 return nil } -func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { - data, err := store.Client.Get(string(fullpath)).Result() + data, err := store.Client.Get(ctx, string(fullpath)).Result() if err == redis.Nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks([]byte(data)) + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -80,9 +88,9 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2 return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { - _, err = store.Client.Del(string(fullpath)).Result() + _, err = store.Client.Del(ctx, string(fullpath)).Result() if err != nil { return fmt.Errorf("delete %s : %v", fullpath, err) @@ -90,7 +98,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file dir, name := fullpath.DirAndName() if name != "" { - _, err = store.Client.SRem(genDirectoryListKey(dir), name).Result() + _, err = store.Client.SRem(ctx, genDirectoryListKey(dir), name).Result() if err != nil { return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) } @@ -99,16 +107,16 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file return nil } -func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + members, err := store.Client.SMembers(ctx, genDirectoryListKey(string(fullpath))).Result() if err != nil { return fmt.Errorf("delete folder %s : %v", fullpath, err) } for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) - _, err = store.Client.Del(string(path)).Result() + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(ctx, string(path)).Result() if err != nil { return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) } @@ -117,12 +125,16 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + dirListKey := genDirectoryListKey(string(dirPath)) + members, err := store.Client.SMembers(ctx, dirListKey).Result() if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) } // skip @@ -131,7 +143,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full for _, m := range members { if strings.Compare(m, startFileName) >= 0 { if m == startFileName { - if inclusive { + if includeStartFile { t = append(t, m) } } else { @@ -148,24 +160,41 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full }) // limit - if limit < len(members) { + if limit < int64(len(members)) { members = members[:limit] } // fetch entry meta for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) + path := util.NewFullPath(string(dirPath), fileName) entry, err := store.FindEntry(ctx, path) + lastFileName = fileName if err != nil { glog.V(0).Infof("list %s : %v", path, err) + if err == filer_pb.ErrNotFound { + continue + } } else { - entries = append(entries, entry) + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(ctx, string(path)).Result() + store.Client.SRem(ctx, dirListKey, fileName).Result() + continue + } + } + if !eachEntryFunc(entry) { + break + } } } - return entries, err + return lastFileName, err } func genDirectoryListKey(dir string) (dirList string) { return dir + DIR_LIST_MARKER } + +func (store *UniversalRedisStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go new file mode 100644 index 000000000..ad6e389ed --- /dev/null +++ b/weed/filer/redis/universal_redis_store_kv.go @@ -0,0 +1,42 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" +) + +func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.Client.Set(ctx, string(key), value, 0).Result() + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + data, err := store.Client.Get(ctx, string(key)).Result() + + if err == redis.Nil { + return nil, filer.ErrKvNotFound + } + + return []byte(data), err +} + +func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.Client.Del(ctx, string(key)).Result() + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go new file mode 100644 index 000000000..22d09da25 --- /dev/null +++ b/weed/filer/redis2/redis_cluster_store.go @@ -0,0 +1,44 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" +) + +func init() { + filer.Stores = append(filer.Stores, &RedisCluster2Store{}) +} + +type RedisCluster2Store struct { + UniversalRedis2Store +} + +func (store *RedisCluster2Store) GetName() string { + return "redis_cluster2" +} + +func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", false) + configuration.SetDefault(prefix+"routeByLatency", false) + + return store.initialize( + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool, superLargeDirectories []string) (err error) { + store.Client = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, + }) + store.loadSuperLargeDirectories(superLargeDirectories) + return +} diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go new file mode 100644 index 000000000..8eb97e374 --- /dev/null +++ b/weed/filer/redis2/redis_store.go @@ -0,0 +1,38 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" +) + +func init() { + filer.Stores = append(filer.Stores, &Redis2Store{}) +} + +type Redis2Store struct { + UniversalRedis2Store +} + +func (store *Redis2Store) GetName() string { + return "redis2" +} + +func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *Redis2Store) initialize(hostPort string, password string, database int, superLargeDirectories []string) (err error) { + store.Client = redis.NewClient(&redis.Options{ + Addr: hostPort, + Password: password, + DB: database, + }) + store.loadSuperLargeDirectories(superLargeDirectories) + return +} diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go new file mode 100644 index 000000000..aab3d1f4a --- /dev/null +++ b/weed/filer/redis2/universal_redis_store.go @@ -0,0 +1,204 @@ +package redis2 + +import ( + "context" + "fmt" + "time" + + "github.com/go-redis/redis/v8" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DIR_LIST_MARKER = "\x00" +) + +type UniversalRedis2Store struct { + Client redis.UniversalClient + superLargeDirectoryHash map[string]bool +} + +func (store *UniversalRedis2Store) isSuperLargeDirectory(dir string) (isSuperLargeDirectory bool) { + _, isSuperLargeDirectory = store.superLargeDirectoryHash[dir] + return +} + +func (store *UniversalRedis2Store) loadSuperLargeDirectories(superLargeDirectories []string) { + // set directory hash + store.superLargeDirectoryHash = make(map[string]bool) + for _, dir := range superLargeDirectories { + store.superLargeDirectoryHash[dir] = true + } +} + +func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + dir, name := entry.FullPath.DirAndName() + if store.isSuperLargeDirectory(dir) { + return nil + } + + if name != "" { + if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil { + return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + data, err := store.Client.Get(ctx, string(fullpath)).Result() + if err == redis.Nil { + return nil, filer_pb.ErrNotFound + } + + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { + + _, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result() + if err != nil { + return fmt.Errorf("delete dir list %s : %v", fullpath, err) + } + + _, err = store.Client.Del(ctx, string(fullpath)).Result() + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + dir, name := fullpath.DirAndName() + if store.isSuperLargeDirectory(dir) { + return nil + } + if name != "" { + _, err = store.Client.ZRem(ctx, genDirectoryListKey(dir), name).Result() + if err != nil { + return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { + + if store.isSuperLargeDirectory(string(fullpath)) { + return nil + } + + members, err := store.Client.ZRange(ctx, genDirectoryListKey(string(fullpath)), 0, -1).Result() + if err != nil { + return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(ctx, string(path)).Result() + if err != nil { + return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + dirListKey := genDirectoryListKey(string(dirPath)) + start := int64(0) + if startFileName != "" { + start, _ = store.Client.ZRank(ctx, dirListKey, startFileName).Result() + if !includeStartFile { + start++ + } + } + members, err := store.Client.ZRange(ctx, dirListKey, start, start+int64(limit)-1).Result() + if err != nil { + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) + } + + // fetch entry meta + for _, fileName := range members { + path := util.NewFullPath(string(dirPath), fileName) + entry, err := store.FindEntry(ctx, path) + lastFileName = fileName + if err != nil { + glog.V(0).Infof("list %s : %v", path, err) + if err == filer_pb.ErrNotFound { + continue + } + } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(ctx, string(path)).Result() + store.Client.ZRem(ctx, dirListKey, fileName).Result() + continue + } + } + if !eachEntryFunc(entry) { + break + } + } + } + + return lastFileName, err +} + +func genDirectoryListKey(dir string) (dirList string) { + return dir + DIR_LIST_MARKER +} + +func (store *UniversalRedis2Store) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go new file mode 100644 index 000000000..bde994dc9 --- /dev/null +++ b/weed/filer/redis2/universal_redis_store_kv.go @@ -0,0 +1,42 @@ +package redis2 + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" +) + +func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.Client.Set(ctx, string(key), value, 0).Result() + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + data, err := store.Client.Get(ctx, string(key)).Result() + + if err == redis.Nil { + return nil, filer.ErrKvNotFound + } + + return []byte(data), err +} + +func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.Client.Del(ctx, string(key)).Result() + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/rocksdb/README.md b/weed/filer/rocksdb/README.md new file mode 100644 index 000000000..6bae6d34e --- /dev/null +++ b/weed/filer/rocksdb/README.md @@ -0,0 +1,41 @@ +# Prepare the compilation environment on linux +- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test +- sudo apt-get update -qq +- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq +- export CXX="g++-6" CC="gcc-6" + +- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb +- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb +- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb +- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb + +# Prepare the compilation environment on mac os +``` +brew install snappy +``` + +# install rocksdb: +``` + export ROCKSDB_HOME=/Users/chris/dev/rocksdb + + git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME + pushd $ROCKSDB_HOME + make clean + make install-static + popd +``` + +# install gorocksdb + +``` +export CGO_CFLAGS="-I$ROCKSDB_HOME/include" +export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" + +go get github.com/tecbot/gorocksdb +``` +# compile with rocksdb + +``` +cd ~/go/src/github.com/chrislusf/seaweedfs/weed +go install -tags rocksdb +``` diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go new file mode 100644 index 000000000..379a18c62 --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -0,0 +1,304 @@ +// +build rocksdb + +package rocksdb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io" + "os" + + "github.com/tecbot/gorocksdb" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer.Stores = append(filer.Stores, &RocksDBStore{}) +} + +type options struct { + opt *gorocksdb.Options + ro *gorocksdb.ReadOptions + wo *gorocksdb.WriteOptions +} + +func (opt *options) init() { + opt.opt = gorocksdb.NewDefaultOptions() + opt.ro = gorocksdb.NewDefaultReadOptions() + opt.wo = gorocksdb.NewDefaultWriteOptions() +} + +func (opt *options) close() { + opt.opt.Destroy() + opt.ro.Destroy() + opt.wo.Destroy() +} + +type RocksDBStore struct { + path string + db *gorocksdb.DB + options +} + +func (store *RocksDBStore) GetName() string { + return "rocksdb" +} + +func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") + return store.initialize(dir) +} + +func (store *RocksDBStore) initialize(dir string) (err error) { + glog.Infof("filer store rocksdb dir: %s", dir) + os.MkdirAll(dir, 0755) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + store.options.init() + store.opt.SetCreateIfMissing(true) + // reduce write amplification + // also avoid expired data stored in highest level never get compacted + store.opt.SetLevelCompactionDynamicLevelBytes(true) + store.opt.SetCompactionFilter(NewTTLFilter()) + // store.opt.SetMaxBackgroundCompactions(2) + + store.db, err = gorocksdb.OpenDb(store.opt, dir) + + return +} + +func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *RocksDBStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + dir, name := entry.DirAndName() + key := genKey(dir, name) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + err = store.db.Put(store.wo, key, value) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + dir, name := fullpath.DirAndName() + key := genKey(dir, name) + data, err := store.db.Get(store.ro, key) + + if data == nil { + return nil, filer_pb.ErrNotFound + } + defer data.Free() + + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(data.Data()) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + dir, name := fullpath.DirAndName() + key := genKey(dir, name) + + err = store.db.Delete(store.wo, key) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + batch := gorocksdb.NewWriteBatch() + defer batch.Destroy() + + ro := gorocksdb.NewDefaultReadOptions() + defer ro.Destroy() + ro.SetFillCache(false) + + iter := store.db.NewIterator(ro) + defer iter.Close() + err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool { + batch.Delete(key) + return true + }) + if err != nil { + return fmt.Errorf("delete list %s : %v", fullpath, err) + } + + err = store.db.Write(store.wo, batch) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) { + + if len(lastKey) == 0 { + iter.Seek(prefix) + } else { + iter.Seek(lastKey) + if !includeLastKey { + if iter.Valid() { + if bytes.Equal(iter.Key().Data(), lastKey) { + iter.Next() + } + } + } + } + + i := int64(0) + for ; iter.Valid(); iter.Next() { + + if limit > 0 { + i++ + if i > limit { + break + } + } + + key := iter.Key().Data() + + if !bytes.HasPrefix(key, prefix) { + break + } + + ret := fn(key, iter.Value().Data()) + + if !ret { + break + } + + } + + if err := iter.Err(); err != nil { + return fmt.Errorf("prefix scan iterator: %v", err) + } + return nil +} + +func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + ro := gorocksdb.NewDefaultReadOptions() + defer ro.Destroy() + ro.SetFillCache(false) + + iter := store.db.NewIterator(ro) + defer iter.Close() + err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool { + fileName := getNameFromKey(key) + if fileName == "" { + return true + } + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), + } + lastFileName = fileName + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + return false + } + if !eachEntryFunc(entry) { + return false + } + return true + }) + if err != nil { + return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err) + } + + return lastFileName, err +} + +func genKey(dirPath, fileName string) (key []byte) { + key = hashToBytes(dirPath) + key = append(key, []byte(fileName)...) + return key +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { + keyPrefix = hashToBytes(string(fullpath)) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory, and use last byte for partitioning +func hashToBytes(dir string) []byte { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + return b +} + +func (store *RocksDBStore) Shutdown() { + store.db.Close() + store.options.close() +} diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go new file mode 100644 index 000000000..cf1214d5b --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store_kv.go @@ -0,0 +1,47 @@ +// +build rocksdb + +package rocksdb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" +) + +func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.db.Put(store.wo, key, value) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.db.GetBytes(store.ro, key) + + if value == nil { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.db.Delete(store.wo, key) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go new file mode 100644 index 000000000..f6e755b4b --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store_test.go @@ -0,0 +1,117 @@ +// +build rocksdb + +package rocksdb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestCreateAndFind(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := testFiler.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + // checking one upper directory + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func BenchmarkInsertEntry(b *testing.B) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + entry := &filer.Entry{ + FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)), + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + }, + } + store.InsertEntry(ctx, entry) + } +} diff --git a/weed/filer/rocksdb/rocksdb_ttl.go b/weed/filer/rocksdb/rocksdb_ttl.go new file mode 100644 index 000000000..faed22310 --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_ttl.go @@ -0,0 +1,40 @@ +//+build rocksdb + +package rocksdb + +import ( + "time" + + "github.com/tecbot/gorocksdb" + + "github.com/chrislusf/seaweedfs/weed/filer" +) + +type TTLFilter struct { + skipLevel0 bool +} + +func NewTTLFilter() gorocksdb.CompactionFilter { + return &TTLFilter{ + skipLevel0: true, + } +} + +func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) { + // decode could be slow, causing write stall + // level >0 sst can run compaction in parallel + if !t.skipLevel0 || level > 0 { + entry := filer.Entry{} + if err := entry.DecodeAttributesAndChunks(val); err == nil { + if entry.TtlSec > 0 && + entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) { + return true, nil + } + } + } + return false, val +} + +func (t *TTLFilter) Name() string { + return "TTLFilter" +} diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go new file mode 100644 index 000000000..92387fb09 --- /dev/null +++ b/weed/filer/s3iam_conf.go @@ -0,0 +1,25 @@ +package filer + +import ( + "bytes" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/golang/protobuf/jsonpb" + "io" +) + +func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfiguration) error { + if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil { + return err + } + return nil +} + +func S3ConfigurationToText(writer io.Writer, config *iam_pb.S3ApiConfiguration) error { + + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", + } + + return m.Marshal(writer, config) +} diff --git a/weed/filer/s3iam_conf_test.go b/weed/filer/s3iam_conf_test.go new file mode 100644 index 000000000..65cc49840 --- /dev/null +++ b/weed/filer/s3iam_conf_test.go @@ -0,0 +1,57 @@ +package filer + +import ( + "bytes" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + + "github.com/stretchr/testify/assert" +) + +func TestS3Conf(t *testing.T) { + s3Conf := &iam_pb.S3ApiConfiguration{ + Identities: []*iam_pb.Identity{ + { + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + }, + { + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_TAGGING, + ACTION_LIST, + }, + }, + }, + } + var buf bytes.Buffer + err := S3ConfigurationToText(&buf, s3Conf) + assert.Equal(t, err, nil) + s3ConfSaved := &iam_pb.S3ApiConfiguration{} + err = ParseS3ConfigurationFromBytes(buf.Bytes(), s3ConfSaved) + assert.Equal(t, err, nil) + + assert.Equal(t, "some_name", s3ConfSaved.Identities[0].Name) + assert.Equal(t, "some_read_only_user", s3ConfSaved.Identities[1].Name) + assert.Equal(t, "some_access_key1", s3ConfSaved.Identities[0].Credentials[0].AccessKey) + assert.Equal(t, "some_secret_key2", s3ConfSaved.Identities[1].Credentials[0].SecretKey) +} diff --git a/weed/filer/stream.go b/weed/filer/stream.go new file mode 100644 index 000000000..661a210ea --- /dev/null +++ b/weed/filer/stream.go @@ -0,0 +1,245 @@ +package filer + +import ( + "bytes" + "fmt" + "golang.org/x/sync/errgroup" + "io" + "math" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error { + + glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks) + chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) + + fileId2Url := make(map[string][]string) + + for _, chunkView := range chunkViews { + + urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } else if len(urlStrings) == 0 { + glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + } + fileId2Url[chunkView.FileId] = urlStrings + } + + if isCheck { + // Pre-check all chunkViews urls + gErr := new(errgroup.Group) + CheckAllChunkViews(chunkViews, &fileId2Url, gErr) + if err := gErr.Wait(); err != nil { + glog.Errorf("check all chunks: %v", err) + return fmt.Errorf("check all chunks: %v", err) + } + return nil + } + + for _, chunkView := range chunkViews { + + urlStrings := fileId2Url[chunkView.FileId] + data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + if err != nil { + glog.Errorf("read chunk: %v", err) + return fmt.Errorf("read chunk: %v", err) + } + + _, err = w.Write(data) + if err != nil { + glog.Errorf("write chunk: %v", err) + return fmt.Errorf("write chunk: %v", err) + } + } + + return nil + +} + +func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) { + for _, chunkView := range chunkViews { + urlStrings := (*fileId2Url)[chunkView.FileId] + glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings) + gErr.Go(func() error { + _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + return err + }) + } +} + +// ---------------- ReadAllReader ---------------------------------- + +func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) { + + buffer := bytes.Buffer{} + + lookupFileIdFn := func(fileId string) (targetUrls []string, err error) { + return masterClient.LookupFileId(fileId) + } + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + for _, chunkView := range chunkViews { + urlStrings, err := lookupFileIdFn(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return nil, err + } + + data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + if err != nil { + return nil, err + } + buffer.Write(data) + } + return buffer.Bytes(), nil +} + +// ---------------- ChunkStreamReader ---------------------------------- +type ChunkStreamReader struct { + chunkViews []*ChunkView + logicOffset int64 + buffer []byte + bufferOffset int64 + bufferPos int + chunkIndex int + lookupFileId wdclient.LookupFileIdFunctionType +} + +var _ = io.ReadSeeker(&ChunkStreamReader{}) + +func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + lookupFileIdFn := func(fileId string) (targetUrl []string, err error) { + return masterClient.LookupFileId(fileId) + } + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: lookupFileIdFn, + } +} + +func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + lookupFileIdFn := LookupFn(filerClient) + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: lookupFileIdFn, + } +} + +func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if c.isBufferEmpty() { + if c.chunkIndex >= len(c.chunkViews) { + return n, io.EOF + } + chunkView := c.chunkViews[c.chunkIndex] + c.fetchChunkToBuffer(chunkView) + c.chunkIndex++ + } + t := copy(p[n:], c.buffer[c.bufferPos:]) + c.bufferPos += t + n += t + } + return +} + +func (c *ChunkStreamReader) isBufferEmpty() bool { + return len(c.buffer) <= c.bufferPos +} + +func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { + + var totalSize int64 + for _, chunk := range c.chunkViews { + totalSize += int64(chunk.Size) + } + + var err error + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += c.bufferOffset + int64(c.bufferPos) + case io.SeekEnd: + offset = totalSize + offset + } + if offset > totalSize { + err = io.ErrUnexpectedEOF + } + + for i, chunk := range c.chunkViews { + if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { + c.fetchChunkToBuffer(chunk) + c.chunkIndex = i + 1 + break + } + } + } + c.bufferPos = int(offset - c.bufferOffset) + + return offset, err + +} + +func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { + urlStrings, err := c.lookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + var buffer bytes.Buffer + var shouldRetry bool + for _, urlString := range urlStrings { + shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + buffer.Write(data) + }) + if !shouldRetry { + break + } + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + buffer.Reset() + } else { + break + } + } + if err != nil { + return err + } + c.buffer = buffer.Bytes() + c.bufferPos = 0 + c.bufferOffset = chunkView.LogicOffset + + // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + + return nil +} + +func (c *ChunkStreamReader) Close() { + // TODO try to release and reuse buffer +} + +func VolumeId(fileId string) string { + lastCommaIndex := strings.LastIndex(fileId, ",") + if lastCommaIndex > 0 { + return fileId[:lastCommaIndex] + } + return fileId +} diff --git a/weed/filer/topics.go b/weed/filer/topics.go new file mode 100644 index 000000000..3a2fde8c4 --- /dev/null +++ b/weed/filer/topics.go @@ -0,0 +1,6 @@ +package filer + +const ( + TopicsDir = "/topics" + SystemLogDir = TopicsDir + "/.system/log" +) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go deleted file mode 100644 index d512467c7..000000000 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ /dev/null @@ -1,184 +0,0 @@ -package abstract_sql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" -) - -type AbstractSqlStore struct { - DB *sql.DB - SqlInsert string - SqlUpdate string - SqlFind string - SqlDelete string - SqlDeleteFolderChildren string - SqlListExclusive string - SqlListInclusive string -} - -type TxOrDB interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) - QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) -} - -func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ - Isolation: sql.LevelReadCommitted, - ReadOnly: false, - }) - if err != nil { - return ctx, err - } - - return context.WithValue(ctx, "tx", tx), nil -} -func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx.Commit() - } - return nil -} -func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx.Rollback() - } - return nil -} - -func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx - } - return store.DB -} - -func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) - if err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("insert %s but no rows affected: %s", entry.FullPath, err) - } - return nil -} - -func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) - if err != nil { - return fmt.Errorf("update %s: %s", entry.FullPath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err) - } - return nil -} - -func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { - - dir, name := fullpath.DirAndName() - row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) - var data []byte - if err := row.Scan(&data); err != nil { - return nil, filer2.ErrNotFound - } - - entry := &filer2.Entry{ - FullPath: fullpath, - } - if err := entry.DecodeAttributesAndChunks(data); err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { - - dir, name := fullpath.DirAndName() - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) - if err != nil { - return fmt.Errorf("delete %s: %s", fullpath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err) - } - - return nil -} - -func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, hashToLong(string(fullpath)), fullpath) - if err != nil { - return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) - } - - return nil -} - -func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - - sqlText := store.SqlListExclusive - if inclusive { - sqlText = store.SqlListInclusive - } - - rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) - if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) - } - defer rows.Close() - - for rows.Next() { - var name string - var data []byte - if err = rows.Scan(&name, &data); err != nil { - glog.V(0).Infof("scan %s : %v", fullpath, err) - return nil, fmt.Errorf("scan %s: %v", fullpath, err) - } - - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), - } - if err = entry.DecodeAttributesAndChunks(data); err != nil { - glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) - return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) - } - - entries = append(entries, entry) - } - - return entries, nil -} diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go deleted file mode 100644 index dcaab8bc4..000000000 --- a/weed/filer2/cassandra/cassandra_store.go +++ /dev/null @@ -1,153 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gocql/gocql" -) - -func init() { - filer2.Stores = append(filer2.Stores, &CassandraStore{}) -} - -type CassandraStore struct { - cluster *gocql.ClusterConfig - session *gocql.Session -} - -func (store *CassandraStore) GetName() string { - return "cassandra" -} - -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), - ) -} - -func (store *CassandraStore) initialize(keyspace string, hosts []string) (err error) { - store.cluster = gocql.NewCluster(hosts...) - store.cluster.Keyspace = keyspace - store.cluster.Consistency = gocql.LocalQuorum - store.session, err = store.cluster.CreateSession() - if err != nil { - glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) - } - return -} - -func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} -func (store *CassandraStore) CommitTransaction(ctx context.Context) error { - return nil -} -func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - if err := store.session.Query( - "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", - dir, name, meta, entry.TtlSec).Exec(); err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - return nil -} - -func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - - dir, name := fullpath.DirAndName() - var data []byte - if err := store.session.Query( - "SELECT meta FROM filemeta WHERE directory=? AND name=?", - dir, name).Consistency(gocql.One).Scan(&data); err != nil { - if err != gocql.ErrNotFound { - return nil, filer2.ErrNotFound - } - } - - if len(data) == 0 { - return nil, filer2.ErrNotFound - } - - entry = &filer2.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(data) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { - - dir, name := fullpath.DirAndName() - - if err := store.session.Query( - "DELETE FROM filemeta WHERE directory=? AND name=?", - dir, name).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { - - if err := store.session.Query( - "DELETE FROM filemeta WHERE directory=?", - fullpath).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - - cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" - if inclusive { - cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" - } - - var data []byte - var name string - iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() - for iter.Scan(&name, &data) { - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), - } - if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { - err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) - break - } - entries = append(entries, entry) - } - if err := iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) - } - - return entries, err -} diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go deleted file mode 100644 index 7b05b53dc..000000000 --- a/weed/filer2/configuration.go +++ /dev/null @@ -1,51 +0,0 @@ -package filer2 - -import ( - "os" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/spf13/viper" -) - -var ( - Stores []FilerStore -) - -func (f *Filer) LoadConfiguration(config *viper.Viper) { - - validateOneEnabledStore(config) - - for _, store := range Stores { - if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize store for %s: %+v", - store.GetName(), err) - } - f.SetStore(store) - glog.V(0).Infof("Configure filer for %s", store.GetName()) - return - } - } - - println() - println("Supported filer stores are:") - for _, store := range Stores { - println(" " + store.GetName()) - } - - os.Exit(-1) -} - -func validateOneEnabledStore(config *viper.Viper) { - enabledStore := "" - for _, store := range Stores { - if config.GetBool(store.GetName() + ".enabled") { - if enabledStore == "" { - enabledStore = store.GetName() - } else { - glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) - } - } - } -} diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go deleted file mode 100644 index c901927bb..000000000 --- a/weed/filer2/entry.go +++ /dev/null @@ -1,73 +0,0 @@ -package filer2 - -import ( - "os" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -type Attr struct { - Mtime time.Time // time of last modification - Crtime time.Time // time of creation (OS X only) - Mode os.FileMode // file mode - Uid uint32 // owner uid - Gid uint32 // group gid - Mime string // mime type - Replication string // replication - Collection string // collection name - TtlSec int32 // ttl in seconds - UserName string - GroupNames []string - SymlinkTarget string -} - -func (attr Attr) IsDirectory() bool { - return attr.Mode&os.ModeDir > 0 -} - -type Entry struct { - FullPath - - Attr - Extended map[string][]byte - - // the following is for files - Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` -} - -func (entry *Entry) Size() uint64 { - return TotalSize(entry.Chunks) -} - -func (entry *Entry) Timestamp() time.Time { - if entry.IsDirectory() { - return entry.Crtime - } else { - return entry.Mtime - } -} - -func (entry *Entry) ToProtoEntry() *filer_pb.Entry { - if entry == nil { - return nil - } - return &filer_pb.Entry{ - Name: entry.FullPath.Name(), - IsDirectory: entry.IsDirectory(), - Attributes: EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, - } -} - -func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { - if entry == nil { - return nil - } - dir, _ := entry.FullPath.DirAndName() - return &filer_pb.FullEntry{ - Dir: dir, - Entry: entry.ToProtoEntry(), - } -} diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go deleted file mode 100644 index b5876df82..000000000 --- a/weed/filer2/filechunks.go +++ /dev/null @@ -1,228 +0,0 @@ -package filer2 - -import ( - "fmt" - "hash/fnv" - "sort" - "sync" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { - for _, c := range chunks { - t := uint64(c.Offset + int64(c.Size)) - if size < t { - size = t - } - } - return -} - -func ETag(chunks []*filer_pb.FileChunk) (etag string) { - if len(chunks) == 1 { - return chunks[0].ETag - } - - h := fnv.New32a() - for _, c := range chunks { - h.Write([]byte(c.ETag)) - } - return fmt.Sprintf("%x", h.Sum32()) -} - -func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { - - visibles := NonOverlappingVisibleIntervals(chunks) - - fileIds := make(map[string]bool) - for _, interval := range visibles { - fileIds[interval.fileId] = true - } - for _, chunk := range chunks { - if _, found := fileIds[chunk.GetFileIdString()]; found { - compacted = append(compacted, chunk) - } else { - garbage = append(garbage, chunk) - } - } - - return -} - -func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { - - fileIds := make(map[string]bool) - for _, interval := range bs { - fileIds[interval.GetFileIdString()] = true - } - for _, chunk := range as { - if _, found := fileIds[chunk.GetFileIdString()]; !found { - delta = append(delta, chunk) - } - } - - return -} - -type ChunkView struct { - FileId string - Offset int64 - Size uint64 - LogicOffset int64 - IsFullChunk bool -} - -func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { - - visibles := NonOverlappingVisibleIntervals(chunks) - - return ViewFromVisibleIntervals(visibles, offset, size) - -} - -func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) { - - stop := offset + int64(size) - - for _, chunk := range visibles { - if chunk.start <= offset && offset < chunk.stop && offset < stop { - isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop - views = append(views, &ChunkView{ - FileId: chunk.fileId, - Offset: offset - chunk.start, // offset is the data starting location in this file id - Size: uint64(min(chunk.stop, stop) - offset), - LogicOffset: offset, - IsFullChunk: isFullChunk, - }) - offset = min(chunk.stop, stop) - } - } - - return views - -} - -func logPrintf(name string, visibles []VisibleInterval) { - /* - log.Printf("%s len %d", name, len(visibles)) - for _, v := range visibles { - log.Printf("%s: => %+v", name, v) - } - */ -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(VisibleInterval) - }, -} - -func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - - newV := newVisibleInterval( - chunk.Offset, - chunk.Offset+int64(chunk.Size), - chunk.GetFileIdString(), - chunk.Mtime, - true, - ) - - length := len(visibles) - if length == 0 { - return append(visibles, newV) - } - last := visibles[length-1] - if last.stop <= chunk.Offset { - return append(visibles, newV) - } - - logPrintf(" before", visibles) - for _, v := range visibles { - if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - v.start, - chunk.Offset, - v.fileId, - v.modifiedTime, - false, - )) - } - chunkStop := chunk.Offset + int64(chunk.Size) - if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false, - )) - } - if chunkStop <= v.start || v.stop <= chunk.Offset { - newVisibles = append(newVisibles, v) - } - } - newVisibles = append(newVisibles, newV) - - logPrintf(" append", newVisibles) - - for i := len(newVisibles) - 1; i >= 0; i-- { - if i > 0 && newV.start < newVisibles[i-1].start { - newVisibles[i] = newVisibles[i-1] - } else { - newVisibles[i] = newV - break - } - } - logPrintf(" sorted", newVisibles) - - return newVisibles -} - -func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) { - - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].Mtime < chunks[j].Mtime - }) - - var newVisibles []VisibleInterval - for _, chunk := range chunks { - newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk) - t := visibles[:0] - visibles = newVisibles - newVisibles = t - - logPrintf("add", visibles) - - } - - return -} - -// find non-overlapping visible intervals -// visible interval map to one file chunk - -type VisibleInterval struct { - start int64 - stop int64 - modifiedTime int64 - fileId string - isFullChunk bool -} - -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval { - return VisibleInterval{ - start: start, - stop: stop, - fileId: fileId, - modifiedTime: modifiedTime, - isFullChunk: isFullChunk, - } -} - -func min(x, y int64) int64 { - if x <= y { - return x - } - return y -} diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go deleted file mode 100644 index b724e20fd..000000000 --- a/weed/filer2/filer.go +++ /dev/null @@ -1,253 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "google.golang.org/grpc" - - "github.com/karlseguin/ccache" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/wdclient" -) - -const PaginationSize = 1024 * 256 - -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) -) - -type Filer struct { - store *FilerStoreWrapper - directoryCache *ccache.Cache - MasterClient *wdclient.MasterClient - fileIdDeletionChan chan string - GrpcDialOption grpc.DialOption -} - -func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { - f := &Filer{ - directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), - fileIdDeletionChan: make(chan string, PaginationSize), - GrpcDialOption: grpcDialOption, - } - - go f.loopProcessingDeletion() - - return f -} - -func (f *Filer) SetStore(store FilerStore) { - f.store = NewFilerStoreWrapper(store) -} - -func (f *Filer) DisableDirectoryCache() { - f.directoryCache = nil -} - -func (fs *Filer) GetMaster() string { - return fs.MasterClient.GetMaster() -} - -func (fs *Filer) KeepConnectedToMaster() { - fs.MasterClient.KeepConnectedToMaster() -} - -func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { - return f.store.BeginTransaction(ctx) -} - -func (f *Filer) CommitTransaction(ctx context.Context) error { - return f.store.CommitTransaction(ctx) -} - -func (f *Filer) RollbackTransaction(ctx context.Context) error { - return f.store.RollbackTransaction(ctx) -} - -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { - - if string(entry.FullPath) == "/" { - return nil - } - - dirParts := strings.Split(string(entry.FullPath), "/") - - // fmt.Printf("directory parts: %+v\n", dirParts) - - var lastDirectoryEntry *Entry - - for i := 1; i < len(dirParts); i++ { - dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...)) - // fmt.Printf("%d directory: %+v\n", i, dirPath) - - // first check local cache - dirEntry := f.cacheGetDirectory(dirPath) - - // not found, check the store directly - if dirEntry == nil { - glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) - } else { - glog.V(4).Infof("found cached directory: %s", dirPath) - } - - // no such existing directory - if dirEntry == nil { - - // create the directory - now := time.Now() - - dirEntry = &Entry{ - FullPath: FullPath(dirPath), - Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0770, - Uid: entry.Uid, - Gid: entry.Gid, - }, - } - - glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) - mkdirErr := f.store.InsertEntry(ctx, dirEntry) - if mkdirErr != nil { - if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { - return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) - } - } else { - f.NotifyUpdateEvent(nil, dirEntry, false) - } - - } else if !dirEntry.IsDirectory() { - return fmt.Errorf("%s is a file", dirPath) - } - - // cache the directory entry - f.cacheSetDirectory(dirPath, dirEntry, i) - - // remember the direct parent directory entry - if i == len(dirParts)-1 { - lastDirectoryEntry = dirEntry - } - - } - - if lastDirectoryEntry == nil { - return fmt.Errorf("parent folder not found: %v", entry.FullPath) - } - - /* - if !hasWritePermission(lastDirectoryEntry, entry) { - glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", - lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) - return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) - } - */ - - oldEntry, _ := f.FindEntry(ctx, entry.FullPath) - - if oldEntry == nil { - if err := f.store.InsertEntry(ctx, entry); err != nil { - glog.Errorf("insert entry %s: %v", entry.FullPath, err) - return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) - } - } else { - if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.Errorf("update entry %s: %v", entry.FullPath, err) - return fmt.Errorf("update entry %s: %v", entry.FullPath, err) - } - } - - f.NotifyUpdateEvent(oldEntry, entry, true) - - f.deleteChunksIfNotNew(oldEntry, entry) - - return nil -} - -func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { - if oldEntry != nil { - if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", entry.FullPath) - return fmt.Errorf("existing %s is a directory", entry.FullPath) - } - if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", entry.FullPath) - return fmt.Errorf("existing %s is a file", entry.FullPath) - } - } - return f.store.UpdateEntry(ctx, entry) -} - -func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) { - - now := time.Now() - - if string(p) == "/" { - return &Entry{ - FullPath: p, - Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0755, - Uid: OS_UID, - Gid: OS_GID, - }, - }, nil - } - return f.store.FindEntry(ctx, p) -} - -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { - if strings.HasSuffix(string(p), "/") && len(p) > 1 { - p = p[0 : len(p)-1] - } - return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) -} - -func (f *Filer) cacheDelDirectory(dirpath string) { - - if dirpath == "/" { - return - } - - if f.directoryCache == nil { - return - } - f.directoryCache.Delete(dirpath) - return -} - -func (f *Filer) cacheGetDirectory(dirpath string) *Entry { - - if f.directoryCache == nil { - return nil - } - item := f.directoryCache.Get(dirpath) - if item == nil { - return nil - } - return item.Value().(*Entry) -} - -func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) { - - if f.directoryCache == nil { - return - } - - minutes := 60 - if level < 10 { - minutes -= level * 6 - } - - f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute) -} diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go deleted file mode 100644 index 1a10f7c20..000000000 --- a/weed/filer2/filer_client_util.go +++ /dev/null @@ -1,172 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - "io" - "math" - "strings" - "sync" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" -) - -func VolumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId -} - -type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error -} - -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { - var vids []string - for _, chunkView := range chunkViews { - vids = append(vids, VolumeId(chunkView.FileId)) - } - - vid2Locations := make(map[string]*filer_pb.Locations) - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return err - } - - vid2Locations = resp.LocationsMap - - return nil - }) - - if err != nil { - return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) - } - - var wg sync.WaitGroup - for _, chunkView := range chunkViews { - wg.Add(1) - go func(chunkView *ChunkView) { - defer wg.Done() - - glog.V(4).Infof("read fh reading chunk: %+v", chunkView) - - locations := vid2Locations[VolumeId(chunkView.FileId)] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", chunkView.FileId) - err = fmt.Errorf("failed to locate %s", chunkView.FileId) - return - } - - var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], - !chunkView.IsFullChunk) - - if err != nil { - - glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err) - - err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) - return - } - - glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) - totalRead += n - - }(chunkView) - } - wg.Wait() - return -} - -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { - - dir, name := FullPath(fullFilePath).DirAndName() - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir, - Name: name, - } - - glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { - return nil - } - glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) - return err - } - - if resp.Entry == nil { - glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) - return nil - } - - entry = resp.Entry - return nil - }) - - return -} - -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - lastEntryName := "" - - request := &filer_pb.ListEntriesRequest{ - Directory: fullDirPath, - Prefix: prefix, - StartFromFileName: lastEntryName, - Limit: math.MaxUint32, - } - - glog.V(3).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list %s: %v", fullDirPath, err) - } - - var prevEntry *filer_pb.Entry - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - if prevEntry != nil { - fn(prevEntry, true) - } - break - } else { - return recvErr - } - } - if prevEntry != nil { - fn(prevEntry, false) - } - prevEntry = resp.Entry - } - - return nil - - }) - - return -} diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go deleted file mode 100644 index 75a09e7ef..000000000 --- a/weed/filer2/filer_delete_entry.go +++ /dev/null @@ -1,102 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { - if p == "/" { - return nil - } - - entry, findErr := f.FindEntry(ctx, p) - if findErr != nil { - return findErr - } - - var chunks []*filer_pb.FileChunk - chunks = append(chunks, entry.Chunks...) - if entry.IsDirectory() { - // delete the folder children, not including the folder itself - var dirChunks []*filer_pb.FileChunk - dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - if err != nil { - return fmt.Errorf("delete directory %s: %v", p, err) - } - chunks = append(chunks, dirChunks...) - f.cacheDelDirectory(string(p)) - } - // delete the file or folder - err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks) - if err != nil { - return fmt.Errorf("delete file %s: %v", p, err) - } - - if shouldDeleteChunks { - go f.DeleteChunks(chunks) - } - - return nil -} - -func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) { - - lastFileName := "" - includeLastFile := false - for { - entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) - if err != nil { - glog.Errorf("list folder %s: %v", entry.FullPath, err) - return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) - } - if lastFileName == "" && !isRecursive && len(entries) > 0 { - // only for first iteration in the loop - return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) - } - - for _, sub := range entries { - lastFileName = sub.Name() - var dirChunks []*filer_pb.FileChunk - if sub.IsDirectory() { - dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - } - if err != nil && !ignoreRecursiveError { - return nil, err - } - if shouldDeleteChunks { - chunks = append(chunks, dirChunks...) - } - } - - if len(entries) < PaginationSize { - break - } - } - - f.cacheDelDirectory(string(entry.FullPath)) - - glog.V(3).Infof("deleting directory %v", entry.FullPath) - - if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { - return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) - } - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - - return chunks, nil -} - -func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) { - - glog.V(3).Infof("deleting entry %v", entry.FullPath) - - if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { - return fmt.Errorf("filer store delete: %v", storeDeletionErr) - } - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - - return nil -} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go deleted file mode 100644 index 9937685f7..000000000 --- a/weed/filer2/filer_deletion.go +++ /dev/null @@ -1,87 +0,0 @@ -package filer2 - -import ( - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) loopProcessingDeletion() { - - ticker := time.NewTicker(5 * time.Second) - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { - m := make(map[string]operation.LookupResult) - for _, vid := range vids { - locs, _ := f.MasterClient.GetVidLocations(vid) - var locations []operation.Location - for _, loc := range locs { - locations = append(locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = operation.LookupResult{ - VolumeId: vid, - Locations: locations, - } - } - return m, nil - } - - var fileIds []string - for { - select { - case fid := <-f.fileIdDeletionChan: - fileIds = append(fileIds, fid) - if len(fileIds) >= 4096 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - } - } -} - -func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { - for _, chunk := range chunks { - f.fileIdDeletionChan <- chunk.GetFileIdString() - } -} - -// DeleteFileByFileId direct delete by file id. -// Only used when the fileId is not being managed by snapshots. -func (f *Filer) DeleteFileByFileId(fileId string) { - f.fileIdDeletionChan <- fileId -} - -func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { - - if oldEntry == nil { - return - } - if newEntry == nil { - f.DeleteChunks(oldEntry.Chunks) - } - - var toDelete []*filer_pb.FileChunk - newChunkIds := make(map[string]bool) - for _, newChunk := range newEntry.Chunks { - newChunkIds[newChunk.GetFileIdString()] = true - } - - for _, oldChunk := range oldEntry.Chunks { - if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { - toDelete = append(toDelete, oldChunk) - } - } - f.DeleteChunks(toDelete) -} diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go deleted file mode 100644 index c37381116..000000000 --- a/weed/filer2/filer_notify.go +++ /dev/null @@ -1,39 +0,0 @@ -package filer2 - -import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) { - var key string - if oldEntry != nil { - key = string(oldEntry.FullPath) - } else if newEntry != nil { - key = string(newEntry.FullPath) - } else { - return - } - - if notification.Queue != nil { - - glog.V(3).Infof("notifying entry update %v", key) - - newParentPath := "" - if newEntry != nil { - newParentPath, _ = newEntry.FullPath.DirAndName() - } - - notification.Queue.SendMessage( - key, - &filer_pb.EventNotification{ - OldEntry: oldEntry.ToProtoEntry(), - NewEntry: newEntry.ToProtoEntry(), - DeleteChunks: deleteChunks, - NewParentPath: newParentPath, - }, - ) - - } -} diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go deleted file mode 100644 index 0bb0bd611..000000000 --- a/weed/filer2/filerstore.go +++ /dev/null @@ -1,138 +0,0 @@ -package filer2 - -import ( - "context" - "errors" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type FilerStore interface { - // GetName gets the name to locate the configuration in filer.toml file - GetName() string - // Initialize initializes the file store - Initialize(configuration util.Configuration) error - InsertEntry(context.Context, *Entry) error - UpdateEntry(context.Context, *Entry) (err error) - // err == filer2.ErrNotFound if not found - FindEntry(context.Context, FullPath) (entry *Entry, err error) - DeleteEntry(context.Context, FullPath) (err error) - DeleteFolderChildren(context.Context, FullPath) (err error) - ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) - - BeginTransaction(ctx context.Context) (context.Context, error) - CommitTransaction(ctx context.Context) error - RollbackTransaction(ctx context.Context) error -} - -var ErrNotFound = errors.New("filer: no entry is found in filer store") - -type FilerStoreWrapper struct { - actualStore FilerStore -} - -func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { - if innerStore, ok := store.(*FilerStoreWrapper); ok { - return innerStore - } - return &FilerStoreWrapper{ - actualStore: store, - } -} - -func (fsw *FilerStoreWrapper) GetName() string { - return fsw.actualStore.GetName() -} - -func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { - return fsw.actualStore.Initialize(configuration) -} - -func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) - }() - - filer_pb.BeforeEntrySerialization(entry.Chunks) - return fsw.actualStore.InsertEntry(ctx, entry) -} - -func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) - }() - - filer_pb.BeforeEntrySerialization(entry.Chunks) - return fsw.actualStore.UpdateEntry(ctx, entry) -} - -func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) - }() - - entry, err = fsw.actualStore.FindEntry(ctx, fp) - if err != nil { - return nil, err - } - filer_pb.AfterEntryDeserialization(entry.Chunks) - return -} - -func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) - }() - - return fsw.actualStore.DeleteEntry(ctx, fp) -} - -func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) - }() - - return fsw.actualStore.DeleteFolderChildren(ctx, fp) -} - -func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) - }() - - entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit) - if err != nil { - return nil, err - } - for _, entry := range entries { - filer_pb.AfterEntryDeserialization(entry.Chunks) - } - return entries, err -} - -func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { - return fsw.actualStore.BeginTransaction(ctx) -} - -func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { - return fsw.actualStore.CommitTransaction(ctx) -} - -func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { - return fsw.actualStore.RollbackTransaction(ctx) -} diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go deleted file mode 100644 index d1b06ece5..000000000 --- a/weed/filer2/mysql/mysql_store.go +++ /dev/null @@ -1,74 +0,0 @@ -package mysql - -import ( - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" - "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/go-sql-driver/mysql" -) - -const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" -) - -func init() { - filer2.Stores = append(filer2.Stores, &MysqlStore{}) -} - -type MysqlStore struct { - abstract_sql.AbstractSqlStore -} - -func (store *MysqlStore) GetName() string { - return "mysql" -} - -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), - configuration.GetBool("interpolateParams"), - ) -} - -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, - interpolateParams bool) (err error) { - - store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" - store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" - store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" - store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" - store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" - store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" - store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" - - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) - if interpolateParams { - sqlUrl += "&interpolateParams=true" - } - - var dbErr error - store.DB, dbErr = sql.Open("mysql", sqlUrl) - if dbErr != nil { - store.DB.Close() - store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) - } - - store.DB.SetMaxIdleConns(maxIdle) - store.DB.SetMaxOpenConns(maxOpen) - - if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", sqlUrl, err) - } - - return nil -} diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go deleted file mode 100644 index 3ec000fe0..000000000 --- a/weed/filer2/postgres/postgres_store.go +++ /dev/null @@ -1,69 +0,0 @@ -package postgres - -import ( - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" - "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/lib/pq" -) - -const ( - CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30" -) - -func init() { - filer2.Stores = append(filer2.Stores, &PostgresStore{}) -} - -type PostgresStore struct { - abstract_sql.AbstractSqlStore -} - -func (store *PostgresStore) GetName() string { - return "postgres" -} - -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), - ) -} - -func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) { - - store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)" - store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" - store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" - store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" - store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" - store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode) - var dbErr error - store.DB, dbErr = sql.Open("postgres", sqlUrl) - if dbErr != nil { - store.DB.Close() - store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) - } - - store.DB.SetMaxIdleConns(maxIdle) - store.DB.SetMaxOpenConns(maxOpen) - - if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", sqlUrl, err) - } - - return nil -} diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go deleted file mode 100644 index 01b87cad1..000000000 --- a/weed/filer2/stream.go +++ /dev/null @@ -1,41 +0,0 @@ -package filer2 - -import ( - "io" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/chrislusf/seaweedfs/weed/wdclient" -) - -func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error { - - chunkViews := ViewFromChunks(chunks, offset, size) - - fileId2Url := make(map[string]string) - - for _, chunkView := range chunkViews { - - urlString, err := masterClient.LookupFileId(chunkView.FileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err - } - fileId2Url[chunkView.FileId] = urlString - } - - for _, chunkView := range chunkViews { - urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { - w.Write(data) - }) - if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) - return err - } - } - - return nil - -} diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go deleted file mode 100644 index 4eb8cb90d..000000000 --- a/weed/filer2/tikv/tikv_store.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build !386 -// +build !arm - -package tikv - -import ( - "bytes" - "context" - "crypto/md5" - "fmt" - "io" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" - - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { - store kv.Storage -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - pdAddr := configuration.GetString("pdAddress") - return store.initialize(pdAddr) -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - glog.Infof("filer store tikv pd address: %s", pdAddr) - - driver := tikv.Driver{} - - store.store, err = driver.Open(fmt.Sprintf("tikv://%s", pdAddr)) - - if err != nil { - return fmt.Errorf("open tikv %s : %v", pdAddr, err) - } - - return -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.store.Begin() - if err != nil { - return ctx, err - } - return context.WithValue(ctx, "tx", tx), nil -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Commit(ctx) - } - return nil -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Rollback() - } - return nil -} - -func (store *TikvStore) getTx(ctx context.Context) kv.Transaction { - if tx, ok := ctx.Value("tx").(kv.Transaction); ok { - return tx - } - return nil -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - dir, name := entry.DirAndName() - key := genKey(dir, name) - - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - err = store.getTx(ctx).Set(key, value) - - if err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) - - return nil -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - data, err := store.getTx(ctx).Get(ctx, key) - - if err == kv.ErrNotExist { - return nil, filer2.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) - } - - entry = &filer2.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(data) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) - - return entry, nil -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - err = store.getTx(ctx).Delete(key) - if err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - - tx := store.getTx(ctx) - - iter, err := tx.Iter(directoryPrefix, nil) - if err != nil { - return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - - if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - iter.Next() - } - - return nil -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - lastFileStart := genDirectoryKeyPrefix(fullpath, startFileName) - - iter, err := store.getTx(ctx).Iter(lastFileStart, nil) - if err != nil { - return nil, fmt.Errorf("list %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - if fileName == startFileName && !inclusive { - iter.Next() - continue - } - limit-- - if limit < 0 { - break - } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), - } - - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) - - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { - err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) - break - } - entries = append(entries, entry) - iter.Next() - } - - return entries, err -} - -func genKey(dirPath, fileName string) (key []byte) { - key = hashToBytes(dirPath) - key = append(key, []byte(fileName)...) - return key -} - -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { - keyPrefix = hashToBytes(string(fullpath)) - if len(startFileName) > 0 { - keyPrefix = append(keyPrefix, []byte(startFileName)...) - } - return keyPrefix -} - -func getNameFromKey(key []byte) string { - - return string(key[md5.Size:]) - -} - -// hash directory -func hashToBytes(dir string) []byte { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - return b -} diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go deleted file mode 100644 index 36de2d974..000000000 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build 386 arm - -package tikv - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - weed_util "github.com/chrislusf/seaweedfs/weed/util" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7b24a1ec5..6ee20974b 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -1,27 +1,39 @@ package filesys import ( + "bytes" "context" + "math" "os" - "path" + "strings" + "syscall" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type Dir struct { - Path string - wfs *WFS - entry *filer_pb.Entry + name string + wfs *WFS + entry *filer_pb.Entry + parent *Dir + id uint64 } var _ = fs.Node(&Dir{}) + +//var _ = fs.NodeIdentifier(&Dir{}) var _ = fs.NodeCreater(&Dir{}) +var _ = fs.NodeMknoder(&Dir{}) var _ = fs.NodeMkdirer(&Dir{}) +var _ = fs.NodeFsyncer(&Dir{}) var _ = fs.NodeRequestLookuper(&Dir{}) var _ = fs.HandleReadDirAller(&Dir{}) var _ = fs.NodeRemover(&Dir{}) @@ -31,44 +43,57 @@ var _ = fs.NodeGetxattrer(&Dir{}) var _ = fs.NodeSetxattrer(&Dir{}) var _ = fs.NodeRemovexattrer(&Dir{}) var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) -func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { +func (dir *Dir) xId() uint64 { + return dir.id +} - glog.V(3).Infof("dir Attr %s", dir.Path) +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second - if dir.Path == dir.wfs.option.FilerMountRootPath { + if dir.FullPath() == dir.wfs.option.FilerMountRootPath { dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err) return err } - attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir - attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) - attr.Gid = dir.entry.Attributes.Gid - attr.Uid = dir.entry.Attributes.Uid + // attr.Inode = dir.Id() + attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) + attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) + attr.Gid = entry.Attributes.Gid + attr.Uid = entry.Attributes.Uid + + glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - glog.V(4).Infof("dir Getxattr %s", dir.Path) + glog.V(4).Infof("dir Getxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - return getxattr(dir.entry, req, resp) + return getxattr(entry, req, resp) } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + // attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() + attr.Valid = time.Second + attr.Inode = 1 // dir.Id() attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -76,84 +101,178 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.Ctime = dir.wfs.option.MountCtime attr.Mtime = dir.wfs.option.MountMtime attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = blockSize +} + +func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { + // fsync works at OS level + // write the file chunks to the filerGrpcAddress + glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req) + + return nil } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { +func (dir *Dir) newFile(name string) fs.Node { + + fileFullPath := util.NewFullPath(dir.FullPath(), name) + fileId := fileFullPath.AsInode() + dir.wfs.handlesLock.Lock() + existingHandle, found := dir.wfs.handles[fileId] + dir.wfs.handlesLock.Unlock() + + if found { + glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath) + return existingHandle.f + } return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, + Name: name, + dir: dir, + wfs: dir.wfs, + id: fileId, } } +func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node { + + return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()} + +} + func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { + exclusive := req.Flags&fuse.OpenExclusive != 0 + isDirectory := req.Mode&os.ModeDir > 0 + + if exclusive || isDirectory { + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive) + if err != nil { + return nil, nil, err + } + } + var node fs.Node + if isDirectory { + node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name)) + return node, nil, nil + } + + node = dir.newFile(req.Name) + file := node.(*File) + file.entry = &filer_pb.Entry{ + Name: req.Name, + IsDirectory: req.Mode&os.ModeDir > 0, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + Collection: dir.wfs.option.Collection, + Replication: dir.wfs.option.Replication, + TtlSec: dir.wfs.option.TtlSec, + }, + } + file.dirtyMetadata = true + fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) + return file, fh, nil + +} + +func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) { + + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false) + + if err != nil { + return nil, err + } + var node fs.Node + node = dir.newFile(req.Name) + return node, nil +} + +func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) { + dirFullPath := dir.FullPath() request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dirFullPath, Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: req.Mode&os.ModeDir > 0, + Name: name, + IsDirectory: mode&os.ModeDir > 0, Attributes: &filer_pb.FuseAttributes{ Mtime: time.Now().Unix(), Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, + FileMode: uint32(mode &^ dir.wfs.option.Umask), + Uid: uid, + Gid: gid, Collection: dir.wfs.option.Collection, Replication: dir.wfs.option.Replication, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: exclusive, + Signatures: []int32{dir.wfs.signature}, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create %s/%s", dirFullPath, name) - if request.Entry.IsDirectory { - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err) + return fuse.EIO } - } - file := dir.newFile(req.Name, request.Entry) - if !request.Entry.IsDirectory { - file.isOpen = true - } - fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true - return file, fh, nil + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err) + return fuse.EIO + } + return nil + }) + return request, err } func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name) + + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + + dirFullPath := dir.FullPath() + + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(newEntry) + defer dir.wfs.mapPbIdFromFilerToLocal(newEntry) request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Directory: dirFullPath, + Entry: newEntry, + Signatures: []int32{dir.wfs.signature}, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) + return err + } + + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err) return fuse.EIO } @@ -161,221 +280,258 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name)) + return node, nil } - return nil, err + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) + + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) + dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir Lookup %s: %s by %s", dirPath, req.Name, req.Header.String()) - var entry *filer_pb.Entry - fullFilePath := path.Join(dir.Path, req.Name) - - item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) + fullFilePath := dirPath.Child(req.Name) + visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath) + if visitErr != nil { + glog.Errorf("dir Lookup %s: %v", dirPath, visitErr) + return nil, fuse.EIO + } + localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT } - if entry == nil { - glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) + if localEntry == nil { + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath) if err != nil { - return nil, err - } - if entry != nil { - dir.wfs.listDirectoryEntriesCache.Set(fullFilePath, entry, 5*time.Minute) + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT } + localEntry = filer.FromPbEntry(string(dirPath), entry) } else { glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } - if entry != nil { - if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, entry: entry} + if localEntry != nil { + if localEntry.IsDirectory() { + node = dir.newDirectory(fullFilePath) } else { - node = dir.newFile(req.Name, entry) + node = dir.newFile(req.Name) } - resp.EntryValid = time.Duration(0) - resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) - resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) - resp.Attr.Gid = entry.Attributes.Gid - resp.Attr.Uid = entry.Attributes.Uid + // resp.EntryValid = time.Second + resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Valid = time.Second + resp.Attr.Mtime = localEntry.Attr.Mtime + resp.Attr.Crtime = localEntry.Attr.Crtime + resp.Attr.Mode = localEntry.Attr.Mode + resp.Attr.Gid = localEntry.Attr.Gid + resp.Attr.Uid = localEntry.Attr.Uid + if localEntry.HardLinkCounter > 0 { + resp.Attr.Nlink = uint32(localEntry.HardLinkCounter) + } return node, nil } + glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - glog.V(3).Infof("dir ReadDirAll %s", dir.Path) + dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir ReadDirAll %s", dirPath) - cacheTtl := 5 * time.Minute - - readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} + processEachEntryFn := func(entry *filer.Entry, isLast bool) { + if entry.IsDirectory() { + dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} + dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) + } + + if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil { + glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) + return nil, fuse.EIO + } + listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool { + processEachEntryFn(entry, false) + return true }) - if readErr != nil { - glog.V(0).Infof("list %s: %v", dir.Path, err) - return ret, fuse.EIO + if listErr != nil { + glog.Errorf("list meta cache: %v", listErr) + return nil, fuse.EIO } + return +} - return ret, err +func findFileType(mode uint16) fuse.DirentType { + switch mode & (syscall.S_IFMT & 0xffff) { + case syscall.S_IFSOCK: + return fuse.DT_Socket + case syscall.S_IFLNK: + return fuse.DT_Link + case syscall.S_IFREG: + return fuse.DT_File + case syscall.S_IFBLK: + return fuse.DT_Block + case syscall.S_IFDIR: + return fuse.DT_Dir + case syscall.S_IFCHR: + return fuse.DT_Char + case syscall.S_IFIFO: + return fuse.DT_FIFO + } + return fuse.DT_File } func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { if !req.Dir { - return dir.removeOneFile(ctx, req) + return dir.removeOneFile(req) } - return dir.removeFolder(ctx, req) + return dir.removeFolder(req) } -func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) + dirFullPath := dir.FullPath() + filePath := util.NewFullPath(dirFullPath, req.Name) + entry, err := filer_pb.GetEntry(dir.wfs, filePath) if err != nil { return err } - dir.wfs.deleteFileChunks(ctx, entry.Chunks) - - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + // first, ensure the filer store can correctly delete + glog.V(3).Infof("remove file: %v", req) + isDeleteData := entry != nil && entry.HardLinkCounter <= 1 + err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature}) + if err != nil { + glog.V(3).Infof("not found remove file %s: %v", filePath, err) + return fuse.ENOENT + } - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: false, - } + // then, delete meta cache and fsNode cache + if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil { + glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err) + return fuse.ESTALE + } - glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + // remove current file handle if any + dir.wfs.handlesLock.Lock() + defer dir.wfs.handlesLock.Unlock() + inodeId := filePath.AsInode() + delete(dir.wfs.handles, inodeId) - return nil - }) + return nil } -func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) +func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: true, + dirFullPath := dir.FullPath() + glog.V(3).Infof("remove directory entry: %v", req) + ignoreRecursiveErr := true // ignore recursion error since the OS should manage it + err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature}) + if err != nil { + glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err) + if strings.Contains(err.Error(), "non-empty") { + return fuse.EEXIST } + return fuse.ENOENT + } - glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + t := util.NewFullPath(dirFullPath, req.Name) + dir.wfs.metaCache.DeleteEntry(context.Background(), t) - return nil - }) + return nil } func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := dir.maybeLoadEntry(ctx); err != nil { + glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req) + + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { - dir.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) } if req.Valid.Uid() { - dir.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid } if req.Valid.Gid() { - dir.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid } if req.Valid.Mtime() { - dir.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { - glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name) + glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := setxattr(dir.entry, req); err != nil { + if err := setxattr(entry, req); err != nil { return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { - glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name) + glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := removexattr(dir.entry, req); err != nil { + if err := removexattr(entry, req); err != nil { return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - glog.V(4).Infof("dir Listxattr %s", dir.Path) + glog.V(4).Infof("dir Listxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := listxattr(dir.entry, req, resp); err != nil { + if err := listxattr(entry, req, resp); err != nil { return err } @@ -383,39 +539,66 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp } -func (dir *Dir) maybeLoadEntry(ctx context.Context) error { - if dir.entry == nil { - parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() - entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name) - if err != nil { - return err - } - if entry == nil { - return fuse.ENOENT - } - dir.entry = entry - } - return nil +func (dir *Dir) Forget() { + glog.V(4).Infof("Forget dir %s", dir.FullPath()) } -func (dir *Dir) saveEntry(ctx context.Context) error { +func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) { + parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName() + return dir.wfs.maybeLoadEntry(parentDirPath, name) +} + +func (dir *Dir) saveEntry(entry *filer_pb.Entry) error { - parentDir, name := filer2.FullPath(dir.Path).DirAndName() + parentDir, name := util.FullPath(dir.FullPath()).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(entry) + defer dir.wfs.mapPbIdFromFilerToLocal(entry) request := &filer_pb.UpdateEntryRequest{ - Directory: parentDir, - Entry: dir.entry, + Directory: parentDir, + Entry: entry, + Signatures: []int32{dir.wfs.signature}, } glog.V(1).Infof("save dir entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) + glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO } + if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err) + return fuse.ESTALE + } + return nil }) } + +func (dir *Dir) FullPath() string { + var parts []string + for p := dir; p != nil; p = p.parent { + if strings.HasPrefix(p.name, "/") { + if len(p.name) > 1 { + parts = append(parts, p.name[1:]) + } + } else { + parts = append(parts, p.name) + } + } + + if len(parts) == 0 { + return "/" + } + + var buf bytes.Buffer + for i := len(parts) - 1; i >= 0; i-- { + buf.WriteString("/") + buf.WriteString(parts[i]) + } + return buf.String() +} diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 8e60872d3..acdcd2de4 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -2,25 +2,110 @@ package filesys import ( "context" + "github.com/chrislusf/seaweedfs/weed/util" "os" "syscall" "time" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) +var _ = fs.NodeLinker(&Dir{}) var _ = fs.NodeSymlinker(&Dir{}) var _ = fs.NodeReadlinker(&File{}) +const ( + HARD_LINK_MARKER = '\x01' +) + +func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) { + + oldFile, ok := old.(*File) + if !ok { + glog.Errorf("old node is not a file: %+v", old) + } + + glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName) + + oldEntry, err := oldFile.maybeLoadEntry(ctx) + if err != nil { + return nil, err + } + + if oldEntry == nil { + return nil, fuse.EIO + } + + // update old file to hardlink mode + if len(oldEntry.HardLinkId) == 0 { + oldEntry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER) + oldEntry.HardLinkCounter = 1 + } + oldEntry.HardLinkCounter++ + updateOldEntryRequest := &filer_pb.UpdateEntryRequest{ + Directory: oldFile.dir.FullPath(), + Entry: oldEntry, + Signatures: []int32{dir.wfs.signature}, + } + + // CreateLink 1.2 : update new file to hardlink mode + request := &filer_pb.CreateEntryRequest{ + Directory: dir.FullPath(), + Entry: &filer_pb.Entry{ + Name: req.NewName, + IsDirectory: false, + Attributes: oldEntry.Attributes, + Chunks: oldEntry.Chunks, + Extended: oldEntry.Extended, + HardLinkId: oldEntry.HardLinkId, + HardLinkCounter: oldEntry.HardLinkCounter, + }, + Signatures: []int32{dir.wfs.signature}, + } + + // apply changes to the filer, and also apply to local metaCache + err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil { + glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err) + return fuse.EIO + } + dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry)) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err) + return fuse.EIO + } + dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + + return nil + }) + + if err != nil { + return nil, fuse.EIO + } + + // create new file node + newNode := dir.newFile(req.NewName) + newFile := newNode.(*File) + + return newFile, err + +} + func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { - glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target) + glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target) request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dir.FullPath(), Entry: &filer_pb.Entry{ Name: req.NewName, IsDirectory: false, @@ -33,17 +118,25 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, SymlinkTarget: req.Target, }, }, + Signatures: []int32{dir.wfs.signature}, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err) return fuse.EIO } + + dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) - symlink := dir.newFile(req.NewName, request.Entry) + symlink := dir.newFile(req.NewName) return symlink, err @@ -51,16 +144,17 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return "", err } - if os.FileMode(file.entry.Attributes.FileMode)&os.ModeSymlink == 0 { + if os.FileMode(entry.Attributes.FileMode)&os.ModeSymlink == 0 { return "", fuse.Errno(syscall.EINVAL) } - glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget) + glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget) - return file.entry.Attributes.SymlinkTarget, nil + return entry.Attributes.SymlinkTarget, nil } diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e72a15758..b07710d17 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,32 +2,90 @@ package filesys import ( "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + newPath := util.NewFullPath(newDir.FullPath(), req.NewName) + oldPath := util.NewFullPath(dir.FullPath(), req.OldName) + + glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) + + // find local old entry + oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath) + if err != nil { + glog.Errorf("dir Rename can not find source %s : %v", oldPath, err) + return fuse.ENOENT + } + + // update remote filer + err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() request := &filer_pb.AtomicRenameEntryRequest{ - OldDirectory: dir.Path, + OldDirectory: dir.FullPath(), OldName: req.OldName, - NewDirectory: newDir.Path, + NewDirectory: newDir.FullPath(), NewName: req.NewName, } _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err) + return fuse.EXDEV } return nil }) + if err != nil { + glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + + // TODO: replicate renaming logic on filer + if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil { + glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + oldEntry.FullPath = newPath + if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil { + glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + + oldFsNode := NodeWithId(oldPath.AsInode()) + newFsNode := NodeWithId(newPath.AsInode()) + dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) { + if file, ok := internalNode.(*File); ok { + glog.V(4).Infof("internal node %s", file.Name) + file.Name = req.NewName + file.id = uint64(newFsNode) + } + }) + + // change file handle + dir.wfs.handlesLock.Lock() + defer dir.wfs.handlesLock.Unlock() + inodeId := oldPath.AsInode() + existingHandle, found := dir.wfs.handles[inodeId] + glog.V(4).Infof("has open filehandle %s: %v", oldPath, found) + if !found || existingHandle == nil { + return nil + } + glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath) + delete(dir.wfs.handles, inodeId) + dir.wfs.handles[newPath.AsInode()] = existingHandle + return nil } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 35d8f249a..8888cff96 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -2,214 +2,117 @@ package filesys import ( "bytes" - "context" - "fmt" + "io" "sync" - "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/security" ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + writeWaitGroup sync.WaitGroup + chunkAddLock sync.Mutex + lastErr error + collection string + replication string } func newDirtyPages(file *File) *ContinuousDirtyPages { - return &ContinuousDirtyPages{ - Data: nil, - f: file, + dirtyPages := &ContinuousDirtyPages{ + intervals: &ContinuousIntervals{}, + f: file, } + return dirtyPages } -func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } -} - -var counter = int32(0) +func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) { -func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { - - pages.lock.Lock() - defer pages.lock.Unlock() - - var chunk *filer_pb.FileChunk + glog.V(4).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. - return pages.flushAndSave(ctx, offset, data) + pages.flushAndSave(offset, data) } - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) - } + pages.intervals.AddInterval(data, offset) - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data - - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) - - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Offset = offset - copy(pages.Data, data) - pages.Size = int64(len(data)) - return + if pages.intervals.TotalSize() >= pages.f.wfs.option.ChunkSizeLimit { + pages.saveExistingLargestPageToStorage() } - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) - } - } else { - copy(pages.Data[offset-pages.Offset:], data) - } - - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } -func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { - - var chunk *filer_pb.FileChunk +func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) { // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Size = 0 - pages.Offset = 0 + pages.saveExistingPagesToStorage() // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } + pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))) return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() { + for pages.saveExistingLargestPageToStorage() { + } +} - pages.lock.Lock() - defer pages.lock.Unlock() +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedData bool) { - if pages.Size == 0 { - return nil, nil + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return false } - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - } + entry := pages.f.getEntry() + if entry == nil { + return false } - return -} -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { + fileSize := int64(entry.Attributes.FileSize) - if pages.Size == 0 { - return nil, nil + chunkSize := min(maxList.Size(), fileSize-maxList.Offset()) + if chunkSize == 0 { + return false } - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) -} - -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { + pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize) - var fileId, host string - var auth security.EncodedJwt + return true +} - if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) { - request := &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: pages.f.wfs.option.Replication, - Collection: pages.f.wfs.option.Collection, - TtlSec: pages.f.wfs.option.TtlSec, - DataCenter: pages.f.wfs.option.DataCenter, - } + mtime := time.Now().UnixNano() + pages.writeWaitGroup.Add(1) + writer := func() { + defer pages.writeWaitGroup.Done() - resp, err := client.AssignVolume(ctx, request) + reader = io.LimitReader(reader, size) + chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset) if err != nil { - glog.V(0).Infof("assign volume failure %v: %v", request, err) - return err + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err) + pages.lastErr = err + return } - - fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) - - return nil - }); err != nil { - return nil, fmt.Errorf("filerGrpcAddress assign volume: %v", err) + chunk.Mtime = mtime + pages.collection, pages.replication = collection, replication + pages.chunkAddLock.Lock() + defer pages.chunkAddLock.Unlock() + pages.f.addChunks([]*filer_pb.FileChunk{chunk}) + glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size) } - fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "", nil, auth) - if err != nil { - glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) - return nil, fmt.Errorf("upload data: %v", err) - } - if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err) - return nil, fmt.Errorf("upload result: %v", uploadResult.Error) + if pages.f.wfs.concurrentWriters != nil { + pages.f.wfs.concurrentWriters.Execute(writer) + } else { + go writer() } - - return &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }, nil - } func max(x, y int64) int64 { @@ -218,3 +121,13 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) { + return pages.intervals.ReadDataAt(data, startOffset) +} diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..1404bf78c --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,223 @@ +package filesys + +import ( + "bytes" + "io" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop - nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + // append to the tail and return + if len(c.lists) == 1 { + lastSpan := c.lists[0] + if lastSpan.Tail.Offset+lastSpan.Tail.Size == offset { + lastSpan.addNodeToTail(interval) + return + } + } + + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadDataAt(data []byte, startOffset int64) (maxStop int64) { + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start < stop { + list.ReadData(data[start-startOffset:], start, stop) + maxStop = max(maxStop, stop) + } + } + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, util.NewBytesReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + if len(readers) == 1 { + return readers[0] + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..d02ad27fd --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,113 @@ +package filesys + +import ( + "bytes" + "math/rand" + "testing" +) + +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 1, + c.AddInterval(getBytes(1, 1), 0) + // _, 2, + c.AddInterval(getBytes(2, 1), 1) + // _, _, 3, 3, 3 + c.AddInterval(getBytes(3, 3), 2) + // _, _, _, 4, 4, 4 + c.AddInterval(getBytes(4, 3), 3) + + expectedData(t, c, 0, 1, 2, 3, 4, 4, 4) + +} + +func TestContinuousIntervals_RealCase1(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + +} + +func TestRandomWrites(t *testing.T) { + + c := &ContinuousIntervals{} + + data := make([]byte, 1024) + + for i := 0; i < 1024; i++ { + + start, stop := rand.Intn(len(data)), rand.Intn(len(data)) + if start > stop { + start, stop = stop, start + } + + rand.Read(data[start : stop+1]) + + c.AddInterval(data[start:stop+1], int64(start)) + + expectedData(t, c, 0, data...) + + } + +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index afe78ee0f..bb57988cd 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -3,20 +3,22 @@ package filesys import ( "context" "os" - "path/filepath" "sort" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const blockSize = 512 var _ = fs.Node(&File{}) +var _ = fs.NodeIdentifier(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) @@ -24,35 +26,56 @@ var _ = fs.NodeGetxattrer(&File{}) var _ = fs.NodeSetxattrer(&File{}) var _ = fs.NodeRemovexattrer(&File{}) var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { - Name string - dir *Dir - wfs *WFS - entry *filer_pb.Entry - entryViewCache []filer2.VisibleInterval - isOpen bool + Name string + dir *Dir + wfs *WFS + entry *filer_pb.Entry + isOpen int + dirtyMetadata bool + id uint64 +} + +func (file *File) fullpath() util.FullPath { + return util.NewFullPath(file.dir.FullPath(), file.Name) } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) Id() uint64 { + return file.id } -func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { +func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) { - glog.V(4).Infof("file Attr %s", file.fullpath()) + glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - attr.Mode = os.FileMode(file.entry.Attributes.FileMode) - attr.Size = filer2.TotalSize(file.entry.Chunks) - attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) - attr.Gid = file.entry.Attributes.Gid - attr.Uid = file.entry.Attributes.Uid + if entry == nil { + return fuse.ENOENT + } + + attr.Inode = file.Id() + attr.Valid = time.Second + attr.Mode = os.FileMode(entry.Attributes.FileMode) + attr.Size = filer.FileSize(entry) + if file.isOpen > 0 { + attr.Size = entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) + } + attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) + attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) + attr.Gid = entry.Attributes.Gid + attr.Uid = entry.Attributes.Uid attr.Blocks = attr.Size/blockSize + 1 attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit) + if entry.HardLinkCounter > 0 { + attr.Nlink = uint32(entry.HardLinkCounter) + } return nil @@ -62,24 +85,23 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp glog.V(4).Infof("file Getxattr %s", file.fullpath()) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - return getxattr(file.entry, req, resp) + return getxattr(entry, req, resp) } func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true - handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) resp.Handle = fuse.HandleID(handle.handle) - glog.V(3).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) + glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) return handle, nil @@ -87,48 +109,89 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := file.maybeLoadEntry(ctx); err != nil { + glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req) + + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } + if file.isOpen > 0 { + file.wfs.handlesLock.Lock() + fileHandle := file.wfs.handles[file.Id()] + file.wfs.handlesLock.Unlock() + + if fileHandle != nil { + fileHandle.Lock() + defer fileHandle.Unlock() + } + } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { - glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks)) + if req.Size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil - file.entryViewCache = nil + var chunks []*filer_pb.FileChunk + var truncatedChunks []*filer_pb.FileChunk + for _, chunk := range entry.Chunks { + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + // this chunk is truncated + int64Size = int64(req.Size) - chunk.Offset + if int64Size > 0 { + chunks = append(chunks, chunk) + glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size) + chunk.Size = uint64(int64Size) + } else { + glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) + truncatedChunks = append(truncatedChunks, chunk) + } + } + } + entry.Chunks = chunks } - file.entry.Attributes.FileSize = req.Size + entry.Attributes.FileSize = req.Size + file.dirtyMetadata = true } + if req.Valid.Mode() { - file.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) + file.dirtyMetadata = true } if req.Valid.Uid() { - file.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid + file.dirtyMetadata = true } if req.Valid.Gid() { - file.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid + file.dirtyMetadata = true } if req.Valid.Crtime() { - file.entry.Attributes.Crtime = req.Crtime.Unix() + entry.Attributes.Crtime = req.Crtime.Unix() + file.dirtyMetadata = true } if req.Valid.Mtime() { - file.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() + file.dirtyMetadata = true + } + + if req.Valid.Handle() { + // fmt.Printf("file handle => %d\n", req.Handle) } - if file.isOpen { + if file.isOpen > 0 { return nil } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) + if !file.dirtyMetadata { + return nil + } - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -136,17 +199,16 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := setxattr(file.entry, req); err != nil { + if err := setxattr(entry, req); err != nil { return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) - - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -154,17 +216,16 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := removexattr(file.entry, req); err != nil { + if err := removexattr(entry, req); err != nil { return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) - - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -172,11 +233,12 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res glog.V(4).Infof("file Listxattr %s", file.fullpath()) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := listxattr(file.entry, req, resp); err != nil { + if err := listxattr(entry, req, resp); err != nil { return err } @@ -187,69 +249,112 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { // fsync works at OS level // write the file chunks to the filerGrpcAddress - glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) + glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req) return nil } -func (file *File) maybeLoadEntry(ctx context.Context) error { - if file.entry == nil || !file.isOpen { - entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) - if err != nil { - return err - } - if entry != nil { - file.setEntry(entry) +func (file *File) Forget() { + t := util.NewFullPath(file.dir.FullPath(), file.Name) + glog.V(4).Infof("Forget file %s", t) + file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode())) +} + +func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) { + + file.wfs.handlesLock.Lock() + handle, found := file.wfs.handles[file.Id()] + file.wfs.handlesLock.Unlock() + entry = file.entry + if found { + glog.V(4).Infof("maybeLoadEntry found opened file %s/%s: %v %v", file.dir.FullPath(), file.Name, handle.f.entry, entry) + entry = handle.f.entry + } + + if entry != nil { + if len(entry.HardLinkId) == 0 { + // only always reload hard link + return entry, nil } } - return nil + entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name) + if err != nil { + glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) + return entry, err + } + if entry != nil { + // file.entry = entry + } else { + glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err) + } + return entry, nil } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) +func lessThan(a, b *filer_pb.FileChunk) bool { + if a.Mtime == b.Mtime { + return a.Fid.FileKey < b.Fid.FileKey } + return a.Mtime < b.Mtime } func (file *File) addChunks(chunks []*filer_pb.FileChunk) { - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].Mtime < chunks[j].Mtime - }) + // find the earliest incoming chunk + newChunks := chunks + earliestChunk := newChunks[0] + for i := 1; i < len(newChunks); i++ { + if lessThan(earliestChunk, newChunks[i]) { + earliestChunk = newChunks[i] + } + } - var newVisibles []filer2.VisibleInterval - for _, chunk := range chunks { - newVisibles = filer2.MergeIntoVisibles(file.entryViewCache, newVisibles, chunk) - t := file.entryViewCache[:0] - file.entryViewCache = newVisibles - newVisibles = t + entry := file.getEntry() + if entry == nil { + return } - glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) + // pick out-of-order chunks from existing chunks + for _, chunk := range entry.Chunks { + if lessThan(earliestChunk, chunk) { + chunks = append(chunks, chunk) + } + } - file.entry.Chunks = append(file.entry.Chunks, chunks...) -} + // sort incoming chunks + sort.Slice(chunks, func(i, j int) bool { + return lessThan(chunks[i], chunks[j]) + }) -func (file *File) setEntry(entry *filer_pb.Entry) { - file.entry = entry - file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) + glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks)) + + entry.Chunks = append(entry.Chunks, newChunks...) } -func (file *File) saveEntry(ctx context.Context) error { - return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (file *File) saveEntry(entry *filer_pb.Entry) error { + return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + file.wfs.mapPbIdFromLocalToFiler(entry) + defer file.wfs.mapPbIdFromFilerToLocal(entry) request := &filer_pb.UpdateEntryRequest{ - Directory: file.dir.Path, - Entry: file.entry, + Directory: file.dir.FullPath(), + Entry: entry, + Signatures: []int32{file.wfs.signature}, } - glog.V(1).Infof("save file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + glog.V(4).Infof("save file entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) + glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) return fuse.EIO } + file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) } + +func (file *File) getEntry() *filer_pb.Entry { + return file.entry +} diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 101f5c056..27ffab6e1 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -3,39 +3,51 @@ package filesys import ( "context" "fmt" - "mime" - "path" + "io" + "math" + "net/http" + "os" + "sync" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type FileHandle struct { // cache file has been written to - dirtyPages *ContinuousDirtyPages - contentType string - dirtyMetadata bool - handle uint64 + dirtyPages *ContinuousDirtyPages + entryViewCache []filer.VisibleInterval + reader io.ReaderAt + contentType string + handle uint64 + sync.Mutex f *File RequestId fuse.RequestID // unique ID for request NodeId fuse.NodeID // file or directory the request is about Uid uint32 // user ID of process making request Gid uint32 // group ID of process making request + } func newFileHandle(file *File, uid, gid uint32) *FileHandle { - return &FileHandle{ + fh := &FileHandle{ f: file, dirtyPages: newDirtyPages(file), Uid: uid, Gid: gid, } + entry := fh.f.getEntry() + if entry != nil { + entry.Attributes.FileSize = filer.FileSize(entry) + } + + return fh } var _ = fs.Handle(&FileHandle{}) @@ -48,134 +60,263 @@ var _ = fs.HandleReleaser(&FileHandle{}) func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) + glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data)) + fh.Lock() + defer fh.Unlock() - // this value should come from the filer instead of the old f - if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) + if req.Size <= 0 { return nil } - buff := make([]byte, req.Size) - - if fh.f.entryViewCache == nil { - fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + buff := resp.Data[:cap(resp.Data)] + if req.Size > cap(resp.Data) { + // should not happen + buff = make([]byte, req.Size) } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) + totalRead, err := fh.readFromChunks(buff, req.Offset) + if err == nil || err == io.EOF { + maxStop := fh.readFromDirtyPages(buff, req.Offset) + totalRead = max(maxStop-req.Offset, totalRead) + } - resp.Data = buff[:totalRead] + if err == io.EOF { + err = nil + } if err != nil { - glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err) + return fuse.EIO + } + + if totalRead > int64(len(buff)) { + glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead) + totalRead = min(int64(len(buff)), totalRead) + } + if err == nil { + resp.Data = buff[:totalRead] } return err } -// Write to the file handle -func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) { + maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset) + return +} - // write the request to volume servers +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) + entry := fh.f.getEntry() + if entry == nil { + return 0, io.EOF + } - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) - if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + fileSize := int64(filer.FileSize(entry)) + fileFullPath := fh.f.fullpath() + + if fileSize == 0 { + glog.V(1).Infof("empty fh %v", fileFullPath) + return 0, io.EOF } - resp.Size = len(req.Data) + if offset+int64(len(buff)) <= int64(len(entry.Content)) { + totalRead := copy(buff, entry.Content[offset:]) + glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) + return int64(totalRead), nil + } - if req.Offset == 0 { - // detect mime type - detectedMIME := mimetype.Detect(req.Data) - fh.contentType = detectedMIME.String() - if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() { - fh.contentType = mime.TypeByExtension(ext) + var chunkResolveErr error + if fh.entryViewCache == nil { + fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks) + if chunkResolveErr != nil { + return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } + fh.reader = nil + } + + reader := fh.reader + if reader == nil { + chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64) + reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize) + } + fh.reader = reader - fh.dirtyMetadata = true + totalRead, err := reader.ReadAt(buff, offset) + + if err != nil && err != io.EOF { + glog.Errorf("file handle read %s: %v", fileFullPath, err) } - if len(chunks) > 0 { + glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) + + return int64(totalRead), err +} - fh.f.addChunks(chunks) +// Write to the file handle +func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + + fh.Lock() + defer fh.Unlock() + + // write the request to volume servers + data := req.Data + if len(data) <= 512 { + // fuse message cacheable size + data = make([]byte, len(req.Data)) + copy(data, req.Data) + } - fh.dirtyMetadata = true + entry := fh.f.getEntry() + if entry == nil { + return fuse.EIO } + entry.Content = nil + entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(entry.Attributes.FileSize))) + glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) + + fh.dirtyPages.AddPage(req.Offset, data) + + resp.Size = len(data) + + if req.Offset == 0 { + // detect mime type + fh.contentType = http.DetectContentType(data) + fh.f.dirtyMetadata = true + } + + fh.f.dirtyMetadata = true + return nil } func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) + glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen) + + fh.Lock() + defer fh.Unlock() - fh.dirtyPages.releaseResource() + fh.f.isOpen-- - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + if fh.f.isOpen <= 0 { + fh.f.entry = nil + fh.entryViewCache = nil + fh.reader = nil - fh.f.isOpen = false + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } + + if fh.f.isOpen < 0 { + glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0) + fh.f.isOpen = 0 + return nil + } return nil } func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { - // fflush works at fh level - // send the data to the OS - glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) - if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle) + + fh.Lock() + defer fh.Unlock() + + if err := fh.doFlush(ctx, req.Header); err != nil { + glog.Errorf("Flush doFlush %s: %v", fh.f.Name, err) + return err } - fh.f.addChunk(chunk) + glog.V(4).Infof("Flush %v fh %d success", fh.f.fullpath(), fh.handle) + return nil +} + +func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { + // flush works at fh level + // send the data to the OS + glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle) + + fh.dirtyPages.saveExistingPagesToStorage() + + fh.dirtyPages.writeWaitGroup.Wait() - if !fh.dirtyMetadata { + if fh.dirtyPages.lastErr != nil { + glog.Errorf("%v doFlush last err: %v", fh.f.fullpath(), fh.dirtyPages.lastErr) + return fuse.EIO + } + + if !fh.f.dirtyMetadata { return nil } - return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + entry := fh.f.getEntry() + if entry == nil { + return nil + } - if fh.f.entry.Attributes != nil { - fh.f.entry.Attributes.Mime = fh.contentType - fh.f.entry.Attributes.Uid = req.Uid - fh.f.entry.Attributes.Gid = req.Gid - fh.f.entry.Attributes.Mtime = time.Now().Unix() - fh.f.entry.Attributes.Crtime = time.Now().Unix() - fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask) + if entry.Attributes != nil { + entry.Attributes.Mime = fh.contentType + if entry.Attributes.Uid == 0 { + entry.Attributes.Uid = header.Uid + } + if entry.Attributes.Gid == 0 { + entry.Attributes.Gid = header.Gid + } + if entry.Attributes.Crtime == 0 { + entry.Attributes.Crtime = time.Now().Unix() + } + entry.Attributes.Mtime = time.Now().Unix() + entry.Attributes.FileMode = uint32(os.FileMode(entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask) + entry.Attributes.Collection = fh.dirtyPages.collection + entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ - Directory: fh.f.dir.Path, - Entry: fh.f.entry, + Directory: fh.f.dir.FullPath(), + Entry: entry, + Signatures: []int32{fh.f.wfs.signature}, } - glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) - for i, chunk := range fh.f.entry.Chunks { - glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(entry.Chunks)) + for i, chunk := range entry.Chunks { + glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } - chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) - fh.f.entry.Chunks = chunks - // fh.f.entryViewCache = nil + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.Errorf("update fh: %v", err) - return fmt.Errorf("update fh: %v", err) + chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks) + chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks) + if manifestErr != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", manifestErr) } + entry.Chunks = append(chunks, manifestChunks...) - fh.f.wfs.deleteFileChunks(ctx, garbages) - for i, chunk := range garbages { - glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) + return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) } + fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) + + if err == nil { + fh.f.dirtyMetadata = false + } + + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO + } + + return nil } diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go new file mode 100644 index 000000000..6b1012090 --- /dev/null +++ b/weed/filesys/fscache.go @@ -0,0 +1,213 @@ +package filesys + +import ( + "sync" + + "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +type FsCache struct { + root *FsNode + sync.RWMutex +} +type FsNode struct { + parent *FsNode + node fs.Node + name string + childrenLock sync.RWMutex + children map[string]*FsNode +} + +func newFsCache(root fs.Node) *FsCache { + return &FsCache{ + root: &FsNode{ + node: root, + }, + } +} + +func (c *FsCache) GetFsNode(path util.FullPath) fs.Node { + + c.RLock() + defer c.RUnlock() + + return c.doGetFsNode(path) +} + +func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node { + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return nil + } + } + return t.node +} + +func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) { + + c.Lock() + defer c.Unlock() + + c.doSetFsNode(path, node) +} + +func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) { + t := c.root + for _, p := range path.Split() { + t = t.ensureChild(p) + } + t.node = node +} + +func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node { + + c.Lock() + defer c.Unlock() + + t := c.doGetFsNode(path) + if t != nil { + return t + } + t = genNodeFn() + c.doSetFsNode(path, t) + return t +} + +func (c *FsCache) DeleteFsNode(path util.FullPath) { + + c.Lock() + defer c.Unlock() + + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return + } + } + if t.parent != nil { + t.parent.disconnectChild(t) + } + t.deleteSelf() +} + +// oldPath and newPath are full path including the new name +func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { + + c.Lock() + defer c.Unlock() + + // find old node + src := c.root + for _, p := range oldPath.Split() { + src = src.findChild(p) + if src == nil { + return src + } + } + if src.parent != nil { + src.parent.disconnectChild(src) + } + + // find new node + target := c.root + for _, p := range newPath.Split() { + target = target.ensureChild(p) + } + parent := target.parent + if dir, ok := src.node.(*Dir); ok { + dir.name = target.name // target is not Dir, but a shortcut + } + if f, ok := src.node.(*File); ok { + f.Name = target.name + entry := f.getEntry() + if entry != nil { + entry.Name = f.Name + } + } + parent.disconnectChild(target) + + target.deleteSelf() + + src.name = target.name + src.connectToParent(parent) + + return src +} + +func (n *FsNode) connectToParent(parent *FsNode) { + n.parent = parent + oldNode := parent.findChild(n.name) + if oldNode != nil { + oldNode.deleteSelf() + } + if dir, ok := n.node.(*Dir); ok { + if parent.node != nil { + dir.parent = parent.node.(*Dir) + } + } + if f, ok := n.node.(*File); ok { + if parent.node != nil { + f.dir = parent.node.(*Dir) + } + } + n.childrenLock.Lock() + parent.children[n.name] = n + n.childrenLock.Unlock() +} + +func (n *FsNode) findChild(name string) *FsNode { + n.childrenLock.RLock() + defer n.childrenLock.RUnlock() + + child, found := n.children[name] + if found { + return child + } + return nil +} + +func (n *FsNode) ensureChild(name string) *FsNode { + n.childrenLock.Lock() + defer n.childrenLock.Unlock() + + if n.children == nil { + n.children = make(map[string]*FsNode) + } + child, found := n.children[name] + if found { + return child + } + t := &FsNode{ + parent: n, + node: nil, + name: name, + children: nil, + } + n.children[name] = t + return t +} + +func (n *FsNode) disconnectChild(child *FsNode) { + n.childrenLock.Lock() + delete(n.children, child.name) + n.childrenLock.Unlock() + child.parent = nil +} + +func (n *FsNode) deleteSelf() { + n.childrenLock.Lock() + for _, child := range n.children { + child.deleteSelf() + } + n.children = nil + n.childrenLock.Unlock() + + n.node = nil + n.parent = nil + +} diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go new file mode 100644 index 000000000..1152eb32e --- /dev/null +++ b/weed/filesys/fscache_test.go @@ -0,0 +1,115 @@ +package filesys + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestPathSplit(t *testing.T) { + parts := util.FullPath("/").Split() + if len(parts) != 0 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + + parts = util.FullPath("/readme.md").Split() + if len(parts) != 1 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + +} + +func TestFsCache(t *testing.T) { + + cache := newFsCache(nil) + + x := cache.GetFsNode(util.FullPath("/y/x")) + if x != nil { + t.Errorf("wrong node!") + } + + p := util.FullPath("/a/b/c") + cache.SetFsNode(p, &File{Name: "cc"}) + tNode := cache.GetFsNode(p) + tFile := tNode.(*File) + if tFile.Name != "cc" { + t.Errorf("expecting a FsNode") + } + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + b := cache.GetFsNode(util.FullPath("/a/b")) + if b != nil { + t.Errorf("unexpected node!") + } + + a := cache.GetFsNode(util.FullPath("/a")) + if a == nil { + t.Errorf("missing node!") + } + + cache.DeleteFsNode(util.FullPath("/a")) + if b != nil { + t.Errorf("unexpected node!") + } + + a = cache.GetFsNode(util.FullPath("/a")) + if a != nil { + t.Errorf("wrong DeleteFsNode!") + } + + z := cache.GetFsNode(util.FullPath("/z")) + if z == nil { + t.Errorf("missing node!") + } + + y := cache.GetFsNode(util.FullPath("/x/y")) + if y != nil { + t.Errorf("wrong node!") + } + +} + +func TestFsCacheMove(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x")) + + d := cache.GetFsNode(util.FullPath("/z/x/d")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "dd" { + t.Errorf("unexpected non dd node!") + } + +} + +func TestFsCacheMove2(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + + cache.Move(util.FullPath("/a/b/d"), util.FullPath("/a/b/e")) + + d := cache.GetFsNode(util.FullPath("/a/b/e")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "e" { + t.Errorf("unexpected node!") + } + +} diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go new file mode 100644 index 000000000..e6593ebde --- /dev/null +++ b/weed/filesys/meta_cache/cache_config.go @@ -0,0 +1,32 @@ +package meta_cache + +import "github.com/chrislusf/seaweedfs/weed/util" + +var ( + _ = util.Configuration(&cacheConfig{}) +) + +// implementing util.Configuraion +type cacheConfig struct { + dir string +} + +func (c cacheConfig) GetString(key string) string { + return c.dir +} + +func (c cacheConfig) GetBool(key string) bool { + panic("implement me") +} + +func (c cacheConfig) GetInt(key string) int { + panic("implement me") +} + +func (c cacheConfig) GetStringSlice(key string) []string { + panic("implement me") +} + +func (c cacheConfig) SetDefault(key string, value interface{}) { + panic("implement me") +} diff --git a/weed/filesys/meta_cache/id_mapper.go b/weed/filesys/meta_cache/id_mapper.go new file mode 100644 index 000000000..4a2179f31 --- /dev/null +++ b/weed/filesys/meta_cache/id_mapper.go @@ -0,0 +1,101 @@ +package meta_cache + +import ( + "fmt" + "strconv" + "strings" +) + +type UidGidMapper struct { + uidMapper *IdMapper + gidMapper *IdMapper +} + +type IdMapper struct { + localToFiler map[uint32]uint32 + filerToLocal map[uint32]uint32 +} + +// UidGidMapper translates local uid/gid to filer uid/gid +// The local storage always persists the same as the filer. +// The local->filer translation happens when updating the filer first and later saving to meta_cache. +// And filer->local happens when reading from the meta_cache. +func NewUidGidMapper(uidPairsStr, gidPairStr string) (*UidGidMapper, error) { + uidMapper, err := newIdMapper(uidPairsStr) + if err != nil { + return nil, err + } + gidMapper, err := newIdMapper(gidPairStr) + if err != nil { + return nil, err + } + + return &UidGidMapper{ + uidMapper: uidMapper, + gidMapper: gidMapper, + }, nil +} + +func (m *UidGidMapper) LocalToFiler(uid, gid uint32) (uint32, uint32) { + return m.uidMapper.LocalToFiler(uid), m.gidMapper.LocalToFiler(gid) +} +func (m *UidGidMapper) FilerToLocal(uid, gid uint32) (uint32, uint32) { + return m.uidMapper.FilerToLocal(uid), m.gidMapper.FilerToLocal(gid) +} + +func (m *IdMapper) LocalToFiler(id uint32) uint32 { + value, found := m.localToFiler[id] + if found { + return value + } + return id +} +func (m *IdMapper) FilerToLocal(id uint32) uint32 { + value, found := m.filerToLocal[id] + if found { + return value + } + return id +} + +func newIdMapper(pairsStr string) (*IdMapper, error) { + + localToFiler, filerToLocal, err := parseUint32Pairs(pairsStr) + if err != nil { + return nil, err + } + + return &IdMapper{ + localToFiler: localToFiler, + filerToLocal: filerToLocal, + }, nil + +} + +func parseUint32Pairs(pairsStr string) (localToFiler, filerToLocal map[uint32]uint32, err error) { + + if pairsStr == "" { + return + } + + localToFiler = make(map[uint32]uint32) + filerToLocal = make(map[uint32]uint32) + for _, pairStr := range strings.Split(pairsStr, ",") { + pair := strings.Split(pairStr, ":") + localUidStr, filerUidStr := pair[0], pair[1] + localUid, localUidErr := strconv.Atoi(localUidStr) + if localUidErr != nil { + err = fmt.Errorf("failed to parse local %s: %v", localUidStr, localUidErr) + return + } + filerUid, filerUidErr := strconv.Atoi(filerUidStr) + if filerUidErr != nil { + err = fmt.Errorf("failed to parse remote %s: %v", filerUidStr, filerUidErr) + return + } + localToFiler[uint32(localUid)] = uint32(filerUid) + filerToLocal[uint32(filerUid)] = uint32(localUid) + } + + return +} diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go new file mode 100644 index 000000000..b9d4724c9 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache.go @@ -0,0 +1,152 @@ +package meta_cache + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/bounded_tree" +) + +// need to have logic similar to FilerStoreWrapper +// e.g. fill fileId field for chunks + +type MetaCache struct { + localStore filer.VirtualFilerStore + sync.RWMutex + visitedBoundary *bounded_tree.BoundedTree + uidGidMapper *UidGidMapper + invalidateFunc func(util.FullPath) +} + +func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMapper, invalidateFunc func(util.FullPath)) *MetaCache { + return &MetaCache{ + localStore: openMetaStore(dbFolder), + visitedBoundary: bounded_tree.NewBoundedTree(baseDir), + uidGidMapper: uidGidMapper, + invalidateFunc: func(fullpath util.FullPath) { + if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) { + fullpath = fullpath[len(baseDir):] + } + invalidateFunc(fullpath) + }, + } +} + +func openMetaStore(dbFolder string) filer.VirtualFilerStore { + + os.RemoveAll(dbFolder) + os.MkdirAll(dbFolder, 0755) + + store := &leveldb.LevelDBStore{} + config := &cacheConfig{ + dir: dbFolder, + } + + if err := store.Initialize(config, ""); err != nil { + glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err) + } + + return filer.NewFilerStoreWrapper(store) + +} + +func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.doInsertEntry(ctx, entry) +} + +func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error { + return mc.localStore.InsertEntry(ctx, entry) +} + +func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + + oldDir, _ := oldPath.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) { + if oldPath != "" { + if newEntry != nil && oldPath == newEntry.FullPath { + // skip the unnecessary deletion + // leave the update to the following InsertEntry operation + } else { + glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name()) + if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { + return err + } + } + } + } else { + // println("unknown old directory:", oldDir) + } + + if newEntry != nil { + newDir, _ := newEntry.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) { + glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name()) + if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil { + return err + } + } + } + return nil +} + +func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.localStore.UpdateEntry(ctx, entry) +} + +func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) { + mc.RLock() + defer mc.RUnlock() + entry, err = mc.localStore.FindEntry(ctx, fp) + if err != nil { + return nil, err + } + mc.mapIdFromFilerToLocal(entry) + return +} + +func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + mc.Lock() + defer mc.Unlock() + return mc.localStore.DeleteEntry(ctx, fp) +} + +func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error { + mc.RLock() + defer mc.RUnlock() + + if !mc.visitedBoundary.HasVisited(dirPath) { + return fmt.Errorf("unsynchronized dir: %v", dirPath) + } + + _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool { + mc.mapIdFromFilerToLocal(entry) + return eachEntryFunc(entry) + }) + if err != nil { + return err + } + return err +} + +func (mc *MetaCache) Shutdown() { + mc.Lock() + defer mc.Unlock() + mc.localStore.Shutdown() +} + +func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) { + entry.Attr.Uid, entry.Attr.Gid = mc.uidGidMapper.FilerToLocal(entry.Attr.Uid, entry.Attr.Gid) +} diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go new file mode 100644 index 000000000..1ca3b16d5 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_init.go @@ -0,0 +1,47 @@ +package meta_cache + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) error { + + return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) { + + glog.V(4).Infof("ReadDirAllEntries %s ...", path) + + util.Retry("ReadDirAllEntries", func() error { + err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { + entry := filer.FromPbEntry(string(dirPath), pbEntry) + if IsHiddenSystemEntry(string(dirPath), entry.Name()) { + return nil + } + if err := mc.doInsertEntry(context.Background(), entry); err != nil { + glog.V(0).Infof("read %s: %v", entry.FullPath, err) + return err + } + if entry.IsDirectory() { + childDirectories = append(childDirectories, entry.Name()) + } + return nil + }) + return err + }) + + if err != nil { + err = fmt.Errorf("list %s: %v", dirPath, err) + } + + return + }) +} + +func IsHiddenSystemEntry(dir, name string) bool { + return dir == "/" && name == "topics" +} diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go new file mode 100644 index 000000000..f9973f436 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_subscribe.go @@ -0,0 +1,86 @@ +package meta_cache + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error { + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + + for _, sig := range message.Signatures { + if sig == selfSignature && selfSignature != 0 { + return nil + } + } + + dir := resp.Directory + var oldPath util.FullPath + var newEntry *filer.Entry + if message.OldEntry != nil { + oldPath = util.NewFullPath(dir, message.OldEntry.Name) + glog.V(4).Infof("deleting %v", oldPath) + } + + if message.NewEntry != nil { + if message.NewParentPath != "" { + dir = message.NewParentPath + } + key := util.NewFullPath(dir, message.NewEntry.Name) + glog.V(4).Infof("creating %v", key) + newEntry = filer.FromPbEntry(dir, message.NewEntry) + } + err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry) + if err == nil && message.OldEntry != nil && message.NewEntry != nil { + key := util.NewFullPath(dir, message.NewEntry.Name) + mc.invalidateFunc(key) + } + + return err + + } + + for { + err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "mount", + PathPrefix: dir, + SinceNs: lastTsNs, + Signature: selfSignature, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + glog.Fatalf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.Errorf("subscribing filer meta change: %v", err) + } + time.Sleep(time.Second) + } +} diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go new file mode 100644 index 000000000..5c2dcf0e1 --- /dev/null +++ b/weed/filesys/unimplemented.go @@ -0,0 +1,22 @@ +package filesys + +import ( + "context" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" +) + +// https://github.com/bazil/fuse/issues/130 + +var _ = fs.NodeAccesser(&Dir{}) + +func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} + +var _ = fs.NodeAccesser(&File{}) + +func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index e924783ec..42816d23d 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -3,32 +3,44 @@ package filesys import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/wdclient" "math" "os" + "path" "sync" "time" - "github.com/karlseguin/ccache" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" ) type Option struct { + MountDirectory string + FilerAddress string FilerGrpcAddress string GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string Replication string TtlSec int32 + DiskType types.DiskType ChunkSizeLimit int64 + ConcurrentWriters int + CacheDir string + CacheSizeMB int64 DataCenter string - DirListCacheLimit int64 - EntryCacheTtl time.Duration Umask os.FileMode MountUid uint32 @@ -36,22 +48,36 @@ type Option struct { MountMode os.FileMode MountCtime time.Time MountMtime time.Time + + VolumeServerAccess string // how to access volume servers + Cipher bool // whether encrypt data on volume server + UidGidMapper *meta_cache.UidGidMapper } var _ = fs.FS(&WFS{}) var _ = fs.FSStatfser(&WFS{}) type WFS struct { - option *Option - listDirectoryEntriesCache *ccache.Cache + option *Option + + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex + handles map[uint64]*FileHandle - // contains all open handles - handles []*FileHandle - pathToHandleIndex map[string]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + bufPool sync.Pool stats statsCache + + root fs.Node + fsNodeCache *FsCache + + chunkCache *chunk_cache.TieredChunkCache + metaCache *meta_cache.MetaCache + signature int32 + + // throttle writers + concurrentWriters *util.LimitedConcurrentExecutor + Server *fs.Server } type statsCache struct { filer_pb.StatisticsResponse @@ -60,72 +86,92 @@ type statsCache struct { func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ - option: option, - listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + option: option, + handles: make(map[uint64]*FileHandle), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, + signature: util.RandomInt32(), + } + cacheUniqueId := util.Md5String([]byte(option.MountDirectory + option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:8] + cacheDir := path.Join(option.CacheDir, cacheUniqueId) + if option.CacheSizeMB > 0 { + os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask) + wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024) } - return wfs -} + wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) { -func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil -} + fsNode := NodeWithId(filePath.AsInode()) + if err := wfs.Server.InvalidateNodeData(fsNode); err != nil { + glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err) + } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { + dir, name := filePath.DirAndName() + parent := NodeWithId(util.FullPath(dir).AsInode()) + if err := wfs.Server.InvalidateEntry(parent, name); err != nil { + glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err) + } + }) + startTime := time.Now() + go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano()) + grace.OnInterrupt(func() { + wfs.metaCache.Shutdown() + }) + + wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} + wfs.fsNodeCache = newFsCache(wfs.root) + + if wfs.option.ConcurrentWriters > 0 { + wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters) + } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + return wfs +} +func (wfs *WFS) Root() (fs.Node, error) { + return wfs.root, nil } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid) + + inodeId := file.Id() - index, found := wfs.pathToHandleIndex[fullpath] - if found && wfs.handles[index] != nil { - glog.V(2).Infoln(fullpath, "found fileHandle id", index) - return wfs.handles[index] + wfs.handlesLock.Lock() + existingHandle, found := wfs.handles[inodeId] + wfs.handlesLock.Unlock() + if found && existingHandle != nil { + existingHandle.f.isOpen++ + glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen) + return existingHandle } + entry, _ := file.maybeLoadEntry(context.Background()) + file.entry = entry fileHandle = newFileHandle(file, uid, gid) - for i, h := range wfs.handles { - if h == nil { - wfs.handles[i] = fileHandle - fileHandle.handle = uint64(i) - wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) - return - } - } + file.isOpen++ - wfs.handles = append(wfs.handles, fileHandle) - fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) - wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + wfs.handlesLock.Lock() + wfs.handles[inodeId] = fileHandle + wfs.handlesLock.Unlock() + fileHandle.handle = inodeId + glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen) return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() +func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) - delete(wfs.pathToHandleIndex, fullpath) - if int(handleId) < len(wfs.handles) { - wfs.handles[int(handleId)] = nil - } + glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + + delete(wfs.handles, uint64(handleId)) return } @@ -137,16 +183,17 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, Replication: wfs.option.Replication, Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec), + DiskType: string(wfs.option.DiskType), } glog.V(4).Infof("reading filer stats: %+v", request) - resp, err := client.Statistics(ctx, request) + resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err @@ -191,3 +238,34 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } + +func (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) { + if entry.Attributes == nil { + return + } + entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid) +} +func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) { + if entry.Attributes == nil { + return + } + entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid) +} + +func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { + if wfs.option.VolumeServerAccess == "filerProxy" { + return func(fileId string) (targetUrls []string, err error) { + return []string{"http://" + wfs.option.FilerAddress + "/?proxyChunkId=" + fileId}, nil + } + } + return filer.LookupFn(wfs) + +} + +type NodeWithId uint64 +func (n NodeWithId) Id() uint64 { + return uint64(n) +} +func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error { + return nil +} diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go deleted file mode 100644 index 6e586b7df..000000000 --- a/weed/filesys/wfs_deletion.go +++ /dev/null @@ -1,69 +0,0 @@ -package filesys - -import ( - "context" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" -) - -func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { - if len(chunks) == 0 { - return - } - - var fileIds []string - for _, chunk := range chunks { - fileIds = append(fileIds, chunk.GetFileIdString()) - } - - wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) - return nil - }) -} - -func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { - - var vids []string - for _, fileId := range fileIds { - vids = append(vids, filer2.VolumeId(fileId)) - } - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { - - m := make(map[string]operation.LookupResult) - - glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return m, err - } - - for _, vid := range vids { - lr := operation.LookupResult{ - VolumeId: vid, - Locations: nil, - } - locations := resp.LocationsMap[vid] - for _, loc := range locations.Locations { - lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = lr - } - - return m, err - } - - _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) - - return err -} diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go new file mode 100644 index 000000000..671d20ba2 --- /dev/null +++ b/weed/filesys/wfs_filer_client.go @@ -0,0 +1,34 @@ +package filesys + +import ( + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +var _ = filer_pb.FilerClient(&WFS{}) + +func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + err := util.Retry("filer grpc "+wfs.option.FilerGrpcAddress, func() error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + }) + + if err == nil { + return nil + } + return err + +} + +func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string { + if wfs.option.VolumeServerAccess == "publicUrl" { + return location.PublicUrl + } + return location.Url +} diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go new file mode 100644 index 000000000..dbec3bebc --- /dev/null +++ b/weed/filesys/wfs_write.go @@ -0,0 +1,75 @@ +package filesys + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { + + return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { + var fileId, host string + var auth security.EncodedJwt + + if err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: wfs.option.Replication, + Collection: wfs.option.Collection, + TtlSec: wfs.option.TtlSec, + DiskType: string(wfs.option.DiskType), + DataCenter: wfs.option.DataCenter, + Path: string(fullPath), + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth) + loc := &filer_pb.Location{ + Url: resp.Url, + PublicUrl: resp.PublicUrl, + } + host = wfs.AdjustedUrl(loc) + collection, replication = resp.Collection, resp.Replication + + return nil + }); err != nil { + return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err) + } + + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if wfs.option.VolumeServerAccess == "filerProxy" { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.option.FilerAddress, fileId) + } + uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth) + if err != nil { + glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload data: %v", err) + } + if uploadResult.Error != "" { + glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error) + } + + wfs.chunkCache.SetChunk(fileId, data) + + chunk = uploadResult.ToPbFileChunk(fileId, offset) + return chunk, collection, replication, nil + } +} diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 3c0ba164a..92e43b675 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -2,11 +2,12 @@ package filesys import ( "context" - "path/filepath" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { @@ -107,36 +108,16 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis } -func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { +func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { - fullpath := filepath.Join(dir, name) - item := wfs.listDirectoryEntriesCache.Get(fullpath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) - return - } - glog.V(3).Infof("read entry cache miss %s", fullpath) - - err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Name: name, - Directory: dir, - } + fullpath := util.NewFullPath(dir, name) + // glog.V(3).Infof("read entry cache miss %s", fullpath) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT - } - - entry = resp.Entry - if entry != nil { - wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl) - } - - return nil - }) - - return + // read from async meta cache + meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir)) + cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT + } + return cachedEntry.ToProtoEntry(), cacheErr } diff --git a/weed/ftpd/ftp_server.go b/weed/ftpd/ftp_server.go new file mode 100644 index 000000000..4a0dca2c3 --- /dev/null +++ b/weed/ftpd/ftp_server.go @@ -0,0 +1,81 @@ +package ftpd + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + + ftpserver "github.com/fclairamb/ftpserverlib" + "google.golang.org/grpc" +) + +type FtpServerOption struct { + Filer string + IP string + IpBind string + Port int + FilerGrpcAddress string + FtpRoot string + GrpcDialOption grpc.DialOption + PassivePortStart int + PassivePortStop int +} + +type SftpServer struct { + option *FtpServerOption + ftpListener net.Listener +} + +var _ = ftpserver.MainDriver(&SftpServer{}) + +// NewServer returns a new FTP server driver +func NewFtpServer(ftpListener net.Listener, option *FtpServerOption) (*SftpServer, error) { + var err error + server := &SftpServer{ + option: option, + ftpListener: ftpListener, + } + return server, err +} + +// GetSettings returns some general settings around the server setup +func (s *SftpServer) GetSettings() (*ftpserver.Settings, error) { + var portRange *ftpserver.PortRange + if s.option.PassivePortStart > 0 && s.option.PassivePortStop > s.option.PassivePortStart { + portRange = &ftpserver.PortRange{ + Start: s.option.PassivePortStart, + End: s.option.PassivePortStop, + } + } + + return &ftpserver.Settings{ + Listener: s.ftpListener, + ListenAddr: fmt.Sprintf("%s:%d", s.option.IpBind, s.option.Port), + PublicHost: s.option.IP, + PassiveTransferPortRange: portRange, + ActiveTransferPortNon20: true, + IdleTimeout: -1, + ConnectionTimeout: 20, + }, nil +} + +// ClientConnected is called to send the very first welcome message +func (s *SftpServer) ClientConnected(cc ftpserver.ClientContext) (string, error) { + return "Welcome to SeaweedFS FTP Server", nil +} + +// ClientDisconnected is called when the user disconnects, even if he never authenticated +func (s *SftpServer) ClientDisconnected(cc ftpserver.ClientContext) { +} + +// AuthUser authenticates the user and selects an handling driver +func (s *SftpServer) AuthUser(cc ftpserver.ClientContext, username, password string) (ftpserver.ClientDriver, error) { + return nil, nil +} + +// GetTLSConfig returns a TLS Certificate to use +// The certificate could frequently change if we use something like "let's encrypt" +func (s *SftpServer) GetTLSConfig() (*tls.Config, error) { + return nil, errors.New("no TLS certificate configured") +} diff --git a/weed/glog/glog.go b/weed/glog/glog.go index f46632f1c..352a7e185 100644 --- a/weed/glog/glog.go +++ b/weed/glog/glog.go @@ -74,8 +74,8 @@ import ( "bufio" "bytes" "errors" - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "io" stdLog "log" "os" @@ -398,7 +398,7 @@ type flushSyncWriter interface { func init() { flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0") flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") diff --git a/weed/glog/glog_file.go b/weed/glog/glog_file.go index bb8e6902f..3f700d8fc 100644 --- a/weed/glog/glog_file.go +++ b/weed/glog/glog_file.go @@ -20,8 +20,8 @@ package glog import ( "errors" - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "os" "os/user" "path/filepath" diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go new file mode 100644 index 000000000..2e5f709f3 --- /dev/null +++ b/weed/iamapi/iamapi_handlers.go @@ -0,0 +1,105 @@ +package iamapi + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + + "net/http" + "net/url" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type mimeType string + +const ( + mimeNone mimeType = "" + mimeXML mimeType = "application/xml" +) + +func setCommonHeaders(w http.ResponseWriter) { + w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("Accept-Ranges", "bytes") +} + +// Encodes the response headers into XML format. +func encodeResponse(response interface{}) []byte { + var bytesBuffer bytes.Buffer + bytesBuffer.WriteString(xml.Header) + e := xml.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// If none of the http routes match respond with MethodNotAllowed +func notFoundHandler(w http.ResponseWriter, r *http.Request) { + glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) + writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL) +} + +func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) { + apiError := s3err.GetAPIError(errorCode) + errorResponse := getRESTErrorResponse(apiError, reqURL.Path) + encodedErrorResponse := encodeResponse(errorResponse) + writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) +} + +func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) { + errCode := err.Error() + errorResp := ErrorResponse{} + errorResp.Error.Type = "Sender" + errorResp.Error.Code = &errCode + if msg != nil { + errMsg := msg.Error() + errorResp.Error.Message = &errMsg + } + glog.Errorf("Response %+v", err) + switch errCode { + case iam.ErrCodeNoSuchEntityException: + msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value) + errorResp.Error.Message = &msg + writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML) + case iam.ErrCodeServiceFailureException: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + default: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + } +} + +func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse { + return s3err.RESTErrorResponse{ + Code: err.Code, + Message: err.Description, + Resource: resource, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } +} + +func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } + if mType != mimeNone { + w.Header().Set("Content-Type", string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } + w.(http.Flusher).Flush() + } +} + +func writeSuccessResponseXML(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response, mimeXML) +} diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go new file mode 100644 index 000000000..b00ada234 --- /dev/null +++ b/weed/iamapi/iamapi_management_handlers.go @@ -0,0 +1,449 @@ +package iamapi + +import ( + "crypto/sha1" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "math/rand" + "net/http" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +const ( + charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/" + policyDocumentVersion = "2012-10-17" + StatementActionAdmin = "*" + StatementActionWrite = "Put*" + StatementActionRead = "Get*" + StatementActionList = "List*" + StatementActionTagging = "Tagging*" +) + +var ( + seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + policyDocuments = map[string]*PolicyDocument{} + policyLock = sync.RWMutex{} +) + +func MapToStatementAction(action string) string { + switch action { + case StatementActionAdmin: + return s3_constants.ACTION_ADMIN + case StatementActionWrite: + return s3_constants.ACTION_WRITE + case StatementActionRead: + return s3_constants.ACTION_READ + case StatementActionList: + return s3_constants.ACTION_LIST + case StatementActionTagging: + return s3_constants.ACTION_TAGGING + default: + return "" + } +} + +func MapToIdentitiesAction(action string) string { + switch action { + case s3_constants.ACTION_ADMIN: + return StatementActionAdmin + case s3_constants.ACTION_WRITE: + return StatementActionWrite + case s3_constants.ACTION_READ: + return StatementActionRead + case s3_constants.ACTION_LIST: + return StatementActionList + case s3_constants.ACTION_TAGGING: + return StatementActionTagging + default: + return "" + } +} + +type Statement struct { + Effect string `json:"Effect"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type Policies struct { + Policies map[string]PolicyDocument `json:"policies"` +} + +type PolicyDocument struct { + Version string `json:"Version"` + Statement []*Statement `json:"Statement"` +} + +func (p PolicyDocument) String() string { + b, _ := json.Marshal(p) + return string(b) +} + +func Hash(s *string) string { + h := sha1.New() + h.Write([]byte(*s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func StringWithCharset(length int, charset string) string { + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) { + for _, ident := range s3cfg.Identities { + resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name}) + } + return resp +} + +func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) { + status := iam.StatusTypeActive + for _, ident := range s3cfg.Identities { + for _, cred := range ident.Credentials { + resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata, + &iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status}, + ) + } + } + return resp +} + +func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) { + userName := values.Get("UserName") + resp.CreateUserResult.User.UserName = &userName + s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName}) + return resp +} + +func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) { + for i, ident := range s3cfg.Identities { + if userName == ident.Name { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) { + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + resp.GetUserResult.User = iam.User{UserName: &ident.Name} + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) { + if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil { + return PolicyDocument{}, err + } + return policyDocument, err +} + +func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) { + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return CreatePolicyResponse{}, err + } + policyId := Hash(&policyDocumentString) + arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName) + resp.CreatePolicyResult.Policy.PolicyName = &policyName + resp.CreatePolicyResult.Policy.Arn = &arn + resp.CreatePolicyResult.Policy.PolicyId = &policyId + policies := Policies{} + policyLock.Lock() + defer policyLock.Unlock() + if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil { + return resp, err + } + policies.Policies[policyName] = policyDocument + if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil { + return resp, err + } + return resp, nil +} + +func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return PutUserPolicyResponse{}, err + } + policyDocuments[policyName] = &policyDocument + actions := GetActions(&policyDocument) + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for _, action := range actions { + ident.Actions = append(ident.Actions, action) + } + break + } + } + return resp, nil +} + +func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + for _, ident := range s3cfg.Identities { + if userName != ident.Name { + continue + } + + resp.GetUserPolicyResult.UserName = userName + resp.GetUserPolicyResult.PolicyName = policyName + if len(ident.Actions) == 0 { + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) + } + + policyDocument := PolicyDocument{Version: policyDocumentVersion} + statements := make(map[string][]string) + for _, action := range ident.Actions { + // parse "Read:EXAMPLE-BUCKET" + act := strings.Split(action, ":") + + resource := "*" + if len(act) == 2 { + resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1]) + } + statements[resource] = append(statements[resource], + fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])), + ) + } + for resource, actions := range statements { + isEqAction := false + for i, statement := range policyDocument.Statement { + if reflect.DeepEqual(statement.Action, actions) { + policyDocument.Statement[i].Resource = append( + policyDocument.Statement[i].Resource, resource) + isEqAction = true + break + } + } + if isEqAction { + continue + } + policyDocumentStatement := Statement{ + Effect: "Allow", + Action: actions, + } + policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource) + policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement) + } + resp.GetUserPolicyResult.PolicyDocument = policyDocument.String() + return resp, nil + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + for i, ident := range s3cfg.Identities { + if ident.Name == userName { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetActions(policy *PolicyDocument) (actions []string) { + for _, statement := range policy.Statement { + if statement.Effect != "Allow" { + continue + } + for _, resource := range statement.Resource { + // Parse "arn:aws:s3:::my-bucket/shared/*" + res := strings.Split(resource, ":") + if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" { + glog.Infof("not match resource: %s", res) + continue + } + for _, action := range statement.Action { + // Parse "s3:Get*" + act := strings.Split(action, ":") + if len(act) != 2 || act[0] != "s3" { + glog.Infof("not match action: %s", act) + continue + } + statementAction := MapToStatementAction(act[1]) + if res[5] == "*" { + actions = append(actions, statementAction) + continue + } + // Parse my-bucket/shared/* + path := strings.Split(res[5], "/") + if len(path) != 2 || path[1] != "*" { + glog.Infof("not match bucket: %s", path) + continue + } + actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0])) + } + } + } + return actions +} + +func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) { + userName := values.Get("UserName") + status := iam.StatusTypeActive + accessKeyId := StringWithCharset(21, charsetUpper) + secretAccessKey := StringWithCharset(42, charset) + resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId + resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey + resp.CreateAccessKeyResult.AccessKey.UserName = &userName + resp.CreateAccessKeyResult.AccessKey.Status = &status + changed := false + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + ident.Credentials = append(ident.Credentials, + &iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey}) + changed = true + break + } + } + if !changed { + s3cfg.Identities = append(s3cfg.Identities, + &iam_pb.Identity{Name: userName, + Credentials: []*iam_pb.Credential{ + { + AccessKey: accessKeyId, + SecretKey: secretAccessKey, + }, + }, + }, + ) + } + return resp +} + +func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) { + userName := values.Get("UserName") + accessKeyId := values.Get("AccessKeyId") + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for i, cred := range ident.Credentials { + if cred.AccessKey == accessKeyId { + ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...) + break + } + } + break + } + } + return resp +} + +func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + values := r.PostForm + var s3cfgLock sync.RWMutex + s3cfgLock.RLock() + s3cfg := &iam_pb.S3ApiConfiguration{} + if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + s3cfgLock.RUnlock() + + glog.V(4).Infof("DoActions: %+v", values) + var response interface{} + var err error + changed := true + switch r.Form.Get("Action") { + case "ListUsers": + response = iama.ListUsers(s3cfg, values) + changed = false + case "ListAccessKeys": + response = iama.ListAccessKeys(s3cfg, values) + changed = false + case "CreateUser": + response = iama.CreateUser(s3cfg, values) + case "GetUser": + userName := values.Get("UserName") + response, err = iama.GetUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + changed = false + case "DeleteUser": + userName := values.Get("UserName") + response, err = iama.DeleteUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + case "CreateAccessKey": + response = iama.CreateAccessKey(s3cfg, values) + case "DeleteAccessKey": + response = iama.DeleteAccessKey(s3cfg, values) + case "CreatePolicy": + response, err = iama.CreatePolicy(s3cfg, values) + if err != nil { + glog.Errorf("CreatePolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "PutUserPolicy": + response, err = iama.PutUserPolicy(s3cfg, values) + if err != nil { + glog.Errorf("PutUserPolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "GetUserPolicy": + response, err = iama.GetUserPolicy(s3cfg, values) + if err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + return + } + changed = false + case "DeleteUserPolicy": + if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + } + default: + errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented) + errorResponse := ErrorResponse{} + errorResponse.Error.Code = &errNotImplemented.Code + errorResponse.Error.Message = &errNotImplemented.Description + writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML) + return + } + if changed { + s3cfgLock.Lock() + err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg) + s3cfgLock.Unlock() + if err != nil { + writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err) + return + } + } + writeSuccessResponseXML(w, encodeResponse(response)) +} diff --git a/weed/iamapi/iamapi_response.go b/weed/iamapi/iamapi_response.go new file mode 100644 index 000000000..77328b608 --- /dev/null +++ b/weed/iamapi/iamapi_response.go @@ -0,0 +1,103 @@ +package iamapi + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type CommonResponse struct { + ResponseMetadata struct { + RequestId string `xml:"RequestId"` + } `xml:"ResponseMetadata"` +} + +type ListUsersResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"` + ListUsersResult struct { + Users []*iam.User `xml:"Users>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListUsersResult"` +} + +type ListAccessKeysResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"` + ListAccessKeysResult struct { + AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListAccessKeysResult"` +} + +type DeleteAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"` +} + +type CreatePolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"` + CreatePolicyResult struct { + Policy iam.Policy `xml:"Policy"` + } `xml:"CreatePolicyResult"` +} + +type CreateUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"` + CreateUserResult struct { + User iam.User `xml:"User"` + } `xml:"CreateUserResult"` +} + +type DeleteUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"` +} + +type GetUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"` + GetUserResult struct { + User iam.User `xml:"User"` + } `xml:"GetUserResult"` +} + +type CreateAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"` + CreateAccessKeyResult struct { + AccessKey iam.AccessKey `xml:"AccessKey"` + } `xml:"CreateAccessKeyResult"` +} + +type PutUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"` +} + +type GetUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"` + GetUserPolicyResult struct { + UserName string `xml:"UserName"` + PolicyName string `xml:"PolicyName"` + PolicyDocument string `xml:"PolicyDocument"` + } `xml:"GetUserPolicyResult"` +} + +type ErrorResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"` + Error struct { + iam.ErrorDetails + Type string `xml:"Type"` + } `xml:"Error"` +} + +func (r *CommonResponse) SetRequestId() { + r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano()) +} diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go new file mode 100644 index 000000000..18af1a919 --- /dev/null +++ b/weed/iamapi/iamapi_server.go @@ -0,0 +1,149 @@ +package iamapi + +// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "net/http" + "strings" +) + +type IamS3ApiConfig interface { + GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + GetPolicies(policies *Policies) (err error) + PutPolicies(policies *Policies) (err error) +} + +type IamS3ApiConfigure struct { + option *IamServerOption + masterClient *wdclient.MasterClient +} + +type IamServerOption struct { + Masters string + Filer string + Port int + FilerGrpcAddress string + GrpcDialOption grpc.DialOption +} + +type IamApiServer struct { + s3ApiConfig IamS3ApiConfig + iam *s3api.IdentityAccessManagement +} + +var s3ApiConfigure IamS3ApiConfig + +func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { + s3ApiConfigure = IamS3ApiConfigure{ + option: option, + masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")), + } + s3Option := s3api.S3ApiServerOption{Filer: option.Filer} + iamApiServer = &IamApiServer{ + s3ApiConfig: s3ApiConfigure, + iam: s3api.NewIdentityAccessManagement(&s3Option), + } + + iamApiServer.registerRouter(router) + + return iamApiServer, nil +} + +func (iama *IamApiServer) registerRouter(router *mux.Router) { + // API Router + apiRouter := router.PathPrefix("/").Subrouter() + // ListBuckets + + // apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST")) + apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN)) + // + // NotFound + apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) +} + +func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() > 0 { + if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil { + return err + } + } + return nil +} + +func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + buf := bytes.Buffer{} + if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil { + return fmt.Errorf("S3ConfigurationToText: %s", err) + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil { + return err + } + return nil + }, + ) +} + +func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() == 0 { + policies.Policies = make(map[string]PolicyDocument) + return nil + } + if err := json.Unmarshal(buf.Bytes(), policies); err != nil { + return err + } + return nil +} + +func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) { + var b []byte + if b, err = json.Marshal(policies); err != nil { + return err + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil { + return err + } + return nil + }, + ) +} diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go new file mode 100644 index 000000000..09aaf0ac8 --- /dev/null +++ b/weed/iamapi/iamapi_test.go @@ -0,0 +1,181 @@ +package iamapi + +import ( + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/gorilla/mux" + "github.com/jinzhu/copier" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" +) + +var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var GetPolicies func(policies *Policies) (err error) +var PutPolicies func(policies *Policies) (err error) + +var s3config = iam_pb.S3ApiConfiguration{} +var policiesFile = Policies{Policies: make(map[string]PolicyDocument)} +var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}} + +type iamS3ApiConfigureMock struct{} + +func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3cfg.Identities, &s3config.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3config.Identities, &s3cfg.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policies, &policiesFile) + return nil +} + +func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policiesFile, &policies) + return nil +} + +func TestCreateUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.CreateUserInput{UserName: userName} + req, _ := iam.New(session.New()).CreateUserRequest(params) + _ = req.Build() + out := CreateUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) + //assert.Equal(t, out.XMLName, "lol") +} + +func TestListUsers(t *testing.T) { + params := &iam.ListUsersInput{} + req, _ := iam.New(session.New()).ListUsersRequest(params) + _ = req.Build() + out := ListUsersResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestListAccessKeys(t *testing.T) { + svc := iam.New(session.New()) + params := &iam.ListAccessKeysInput{} + req, _ := svc.ListAccessKeysRequest(params) + _ = req.Build() + out := ListAccessKeysResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserInput{UserName: userName} + req, _ := iam.New(session.New()).GetUserRequest(params) + _ = req.Build() + out := GetUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +// Todo flat statement +func TestCreatePolicy(t *testing.T) { + params := &iam.CreatePolicyInput{ + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String(` + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).CreatePolicyRequest(params) + _ = req.Build() + out := CreatePolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestPutUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.PutUserPolicyInput{ + UserName: userName, + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String( + `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).PutUserPolicyRequest(params) + _ = req.Build() + out := PutUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")} + req, _ := iam.New(session.New()).GetUserPolicyRequest(params) + _ = req.Build() + out := GetUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestDeleteUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.DeleteUserInput{UserName: userName} + req, _ := iam.New(session.New()).DeleteUserRequest(params) + _ = req.Build() + out := DeleteUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) { + rr := httptest.NewRecorder() + apiRouter := mux.NewRouter().SkipClean(true) + apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions) + apiRouter.ServeHTTP(rr, req) + return rr, xml.Unmarshal(rr.Body.Bytes(), &v) +} diff --git a/weed/images/orientation.go b/weed/images/orientation.go index 4bff89311..a592a7d8b 100644 --- a/weed/images/orientation.go +++ b/weed/images/orientation.go @@ -7,7 +7,7 @@ import ( "image/jpeg" "log" - "github.com/rwcarlsen/goexif/exif" + "github.com/seaweedfs/goexif/exif" ) //many code is copied from http://camlistore.org/pkg/images/images.go diff --git a/weed/images/resizing.go b/weed/images/resizing.go index ff0eff5e1..b048daa1c 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -6,10 +6,11 @@ import ( "image/gif" "image/jpeg" "image/png" + "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/disintegration/imaging" - "io" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) { @@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re } } } else { + read.Seek(0, 0) return read, bounds.Dx(), bounds.Dy() } var buf bytes.Buffer diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go new file mode 100644 index 000000000..8e5b56fd0 --- /dev/null +++ b/weed/messaging/broker/broker_append.go @@ -0,0 +1,113 @@ +package broker + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error { + + assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data) + if err2 != nil { + return err2 + } + + dir, name := util.FullPath(targetFile).DirAndName() + + // append the chunk + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AppendToEntryRequest{ + Directory: dir, + EntryName: name, + Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)}, + } + + _, err := client.AppendToEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("append to file %v: %v", request, err) + return err + } + + return nil + }); err != nil { + return fmt.Errorf("append to file %v: %v", targetFile, err) + } + + return nil +} + +func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + + var assignResult = &operation.AssignResult{} + + // assign a volume location + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: topicConfig.Replication, + Collection: topicConfig.Collection, + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + assignResult.Auth = security.EncodedJwt(resp.Auth) + assignResult.Fid = resp.FileId + assignResult.Url = resp.Url + assignResult.PublicUrl = resp.PublicUrl + assignResult.Count = uint64(resp.Count) + + return nil + }); err != nil { + return nil, nil, err + } + + // upload data + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} + +var _ = filer_pb.FilerClient(&MessageBroker{}) + +func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) { + + for _, filer := range broker.option.Filers { + if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil { + if err == io.EOF { + return + } + glog.V(0).Infof("fail to connect to %s: %v", filer, err) + } else { + break + } + } + + return + +} + +func (broker *MessageBroker) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go new file mode 100644 index 000000000..ba141fdd0 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server.go @@ -0,0 +1,37 @@ +package broker + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) { + resp := &messaging_pb.DeleteTopicResponse{} + dir, entry := genTopicDirEntry(request.Namespace, request.Topic) + if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil { + return nil, err + } else if exists { + err = filer_pb.Remove(broker, dir, entry, true, true, true, false, nil) + } + return resp, nil +} + +func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) { + panic("implement me") +} + +func genTopicDir(namespace, topic string) string { + return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic) +} + +func genTopicDirEntry(namespace, topic string) (dir, entry string) { + return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic +} diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go new file mode 100644 index 000000000..3c14f3220 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_discovery.go @@ -0,0 +1,116 @@ +package broker + +import ( + "context" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +/* +Topic discovery: + +When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker. + +The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it. +Otherwise, just host the topic. + +So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy. +If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help. + +*/ + +func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) { + + t := &messaging_pb.FindBrokerResponse{} + var peers []string + + targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition) + + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{ + Resource: targetTopicPartition, + }) + if err != nil { + return err + } + if resp.Found && len(resp.Resources) > 0 { + t.Broker = resp.Resources[0].GrpcAddresses + return nil + } + for _, b := range resp.Resources { + peers = append(peers, b.GrpcAddresses) + } + return nil + }) + if err != nil { + return nil, err + } + } + + t.Broker = PickMember(peers, []byte(targetTopicPartition)) + + return t, nil + +} + +func (broker *MessageBroker) checkFilers() { + + // contact a filer about masters + var masters []string + found := false + for !found { + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received master list: %s", masters) + + // contact each masters for filers + var filers []string + found = false + for !found { + for _, master := range masters { + err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{ + ClientType: "filer", + }) + if err != nil { + return err + } + + filers = append(filers, resp.GrpcAddresses...) + + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to list filers: %v", err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received filer list: %s", filers) + + broker.option.Filers = filers + +} diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go new file mode 100644 index 000000000..6e6b723d1 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_publish.go @@ -0,0 +1,112 @@ +package broker + +import ( + "crypto/md5" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // send init response + initResponse := &messaging_pb.PublishResponse{ + Config: nil, + Redirect: nil, + } + err = stream.Send(initResponse) + if err != nil { + return err + } + if initResponse.Redirect != nil { + return nil + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + + tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic) + md5File := fmt.Sprintf("p%02d.md5", tp.Partition) + // println("chan data stored under", tpDir, "as", md5File) + + if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists { + return fmt.Errorf("channel is already closed") + } + + tl := broker.topicManager.RequestLock(tp, topicConfig, true) + defer broker.topicManager.ReleaseLock(tp, true) + + md5hash := md5.New() + // process each message + for { + // println("recv") + in, err := stream.Recv() + // glog.V(0).Infof("recieved %v err: %v", in, err) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if in.Data == nil { + continue + } + + // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value)) + + data, err := proto.Marshal(in.Data) + if err != nil { + glog.Errorf("marshall error: %v\n", err) + continue + } + + tl.logBuffer.AddToBuffer(in.Data.Key, data, in.Data.EventTimeNs) + + if in.Data.IsClose { + // println("server received closing") + break + } + + md5hash.Write(in.Data.Value) + + } + + if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil { + glog.V(0).Infof("err writing %s: %v", md5File, err) + } + + // fmt.Printf("received md5 %X\n", md5hash.Sum(nil)) + + // send the close ack + // println("server send ack closing") + if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil { + glog.V(0).Infof("err sending close response: %v", err) + } + return nil + +} diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go new file mode 100644 index 000000000..3021473e5 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_subscribe.go @@ -0,0 +1,177 @@ +package broker + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var processedTsNs int64 + var messageCount int64 + subscriberId := in.Init.SubscriberId + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String()) + defer func() { + fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs)) + }() + + lock := broker.topicManager.RequestLock(tp, topicConfig, false) + defer broker.topicManager.ReleaseLock(tp, false) + + isConnected := true + go func() { + for isConnected { + if _, err := stream.Recv(); err != nil { + // println("disconnecting connection to", subscriberId, tp.String()) + isConnected = false + lock.cond.Signal() + } + } + }() + + lastReadTime := time.Now() + switch in.Init.StartPosition { + case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP: + lastReadTime = time.Unix(0, in.Init.TimestampNs) + case messaging_pb.SubscriberMessage_InitMessage_LATEST: + case messaging_pb.SubscriberMessage_InitMessage_EARLIEST: + lastReadTime = time.Unix(0, 0) + } + + // how to process each message + // an error returned will end the subscription + eachMessageFn := func(m *messaging_pb.Message) error { + err := stream.Send(&messaging_pb.BrokerMessage{ + Data: m, + }) + if err != nil { + glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err) + } + return err + } + + eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error { + m := &messaging_pb.Message{} + if err = proto.Unmarshal(logEntry.Data, m); err != nil { + glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) + return err + } + // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs) + if err = eachMessageFn(m); err != nil { + glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err) + return err + } + if m.IsClose { + // println("processed EOF") + return io.EOF + } + processedTsNs = logEntry.TsNs + messageCount++ + return nil + } + + // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime) + + for { + + if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil { + if err != io.EOF { + // println("stopping from persisted logs", err.Error()) + return err + } + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool { + lock.Mutex.Lock() + lock.cond.Wait() + lock.Mutex.Unlock() + return isConnected + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) { + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + topicDir := genTopicDir(tp.Namespace, tp.Topic) + partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition) + + return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error { + dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name) + return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error { + if dayEntry.Name == startDate { + if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 { + return nil + } + } + if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) { + return nil + } + // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name) + chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks) + defer chunkedFileReader.Close() + if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + return err + } + return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err) + } + return nil + }, "", false, 24*60) + }, startDate, true, 366) + +} diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go new file mode 100644 index 000000000..06162471c --- /dev/null +++ b/weed/messaging/broker/broker_server.go @@ -0,0 +1,114 @@ +package broker + +import ( + "context" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +type MessageBrokerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Ip string + Port int + Cipher bool +} + +type MessageBroker struct { + option *MessageBrokerOption + grpcDialOption grpc.DialOption + topicManager *TopicManager +} + +func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: grpcDialOption, + } + + messageBroker.topicManager = NewTopicManager(messageBroker) + + messageBroker.checkFilers() + + go messageBroker.keepConnectedToOneFiler() + + return messageBroker, nil +} + +func (broker *MessageBroker) keepConnectedToOneFiler() { + + for { + for _, filer := range broker.option.Filers { + broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.KeepConnected(ctx) + if err != nil { + glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + initRequest := &filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + } + for _, tp := range broker.topicManager.ListTopicPartitions() { + initRequest.Resources = append(initRequest.Resources, tp.String()) + } + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + // TODO send events of adding/removing topics + + glog.V(0).Infof("conntected with filer: %v", filer) + for { + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("send heartbeat") + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("received reply") + time.Sleep(11 * time.Second) + // println("woke up") + } + return nil + }) + time.Sleep(3 * time.Second) + } + } + +} + +func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go new file mode 100644 index 000000000..465a2a8f2 --- /dev/null +++ b/weed/messaging/broker/consistent_distribution.go @@ -0,0 +1,38 @@ +package broker + +import ( + "github.com/buraksezer/consistent" + "github.com/cespare/xxhash" +) + +type Member string + +func (m Member) String() string { + return string(m) +} + +type hasher struct{} + +func (h hasher) Sum64(data []byte) uint64 { + return xxhash.Sum64(data) +} + +func PickMember(members []string, key []byte) string { + cfg := consistent.Config{ + PartitionCount: 9791, + ReplicationFactor: 2, + Load: 1.25, + Hasher: hasher{}, + } + + cmembers := []consistent.Member{} + for _, m := range members { + cmembers = append(cmembers, Member(m)) + } + + c := consistent.New(cmembers, cfg) + + m := c.LocateKey(key) + + return m.String() +} diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go new file mode 100644 index 000000000..f58fe4e0e --- /dev/null +++ b/weed/messaging/broker/consistent_distribution_test.go @@ -0,0 +1,32 @@ +package broker + +import ( + "fmt" + "testing" +) + +func TestPickMember(t *testing.T) { + + servers := []string{ + "s1:port", + "s2:port", + "s3:port", + "s5:port", + "s4:port", + } + + total := 1000 + + distribution := make(map[string]int) + for i := 0; i < total; i++ { + tp := fmt.Sprintf("tp:%2d", i) + m := PickMember(servers, []byte(tp)) + // println(tp, "=>", m) + distribution[m]++ + } + + for member, count := range distribution { + fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers))) + } + +} diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go new file mode 100644 index 000000000..edddca813 --- /dev/null +++ b/weed/messaging/broker/topic_manager.go @@ -0,0 +1,124 @@ +package broker + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type TopicPartition struct { + Namespace string + Topic string + Partition int32 +} + +const ( + TopicPartitionFmt = "%s/%s_%02d" +) + +func (tp *TopicPartition) String() string { + return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition) +} + +type TopicControl struct { + sync.Mutex + cond *sync.Cond + subscriberCount int + publisherCount int + logBuffer *log_buffer.LogBuffer +} + +type TopicManager struct { + sync.Mutex + topicControls map[TopicPartition]*TopicControl + broker *MessageBroker +} + +func NewTopicManager(messageBroker *MessageBroker) *TopicManager { + return &TopicManager{ + topicControls: make(map[TopicPartition]*TopicControl), + broker: messageBroker, + } +} + +func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer { + + flushFn := func(startTime, stopTime time.Time, buf []byte) { + + if topicConfig.IsTransient { + // return + } + + // fmt.Printf("flushing with topic config %+v\n", topicConfig) + + startTime, stopTime = startTime.UTC(), stopTime.UTC() + targetFile := fmt.Sprintf( + "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", + filer.TopicsDir, tp.Namespace, tp.Topic, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + tp.Partition, + ) + + if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil { + glog.V(0).Infof("log write failed %s: %v", targetFile, err) + } + } + logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() { + tl.cond.Broadcast() + }) + + return logBuffer +} + +func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl { + tm.Lock() + defer tm.Unlock() + + tc, found := tm.topicControls[partition] + if !found { + tc = &TopicControl{} + tc.cond = sync.NewCond(&tc.Mutex) + tm.topicControls[partition] = tc + tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig) + } + if isPublisher { + tc.publisherCount++ + } else { + tc.subscriberCount++ + } + return tc +} + +func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) { + tm.Lock() + defer tm.Unlock() + + lock, found := tm.topicControls[partition] + if !found { + return + } + if isPublisher { + lock.publisherCount-- + } else { + lock.subscriberCount-- + } + if lock.subscriberCount <= 0 && lock.publisherCount <= 0 { + delete(tm.topicControls, partition) + lock.logBuffer.Shutdown() + } +} + +func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) { + tm.Lock() + defer tm.Unlock() + + for k := range tm.topicControls { + tps = append(tps, k) + } + return +} diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go new file mode 100644 index 000000000..a75678815 --- /dev/null +++ b/weed/messaging/msgclient/chan_config.go @@ -0,0 +1,5 @@ +package msgclient + +func (mc *MessagingClient) DeleteChannel(chanName string) error { + return mc.DeleteTopic("chan", chanName) +} diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go new file mode 100644 index 000000000..9bc88f7c0 --- /dev/null +++ b/weed/messaging/msgclient/chan_pub.go @@ -0,0 +1,76 @@ +package msgclient + +import ( + "crypto/md5" + "hash" + "io" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type PubChannel struct { + client messaging_pb.SeaweedMessaging_PublishClient + grpcConnection *grpc.ClientConn + md5hash hash.Hash +} + +func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + pc, err := setupPublisherClient(grpcConnection, tp) + if err != nil { + return nil, err + } + return &PubChannel{ + client: pc, + grpcConnection: grpcConnection, + md5hash: md5.New(), + }, nil +} + +func (pc *PubChannel) Publish(m []byte) error { + err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + Value: m, + }, + }) + if err == nil { + pc.md5hash.Write(m) + } + return err +} +func (pc *PubChannel) Close() error { + + // println("send closing") + if err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + IsClose: true, + }, + }); err != nil { + log.Printf("err send close: %v", err) + } + // println("receive closing") + if _, err := pc.client.Recv(); err != nil && err != io.EOF { + log.Printf("err receive close: %v", err) + } + // println("close connection") + if err := pc.grpcConnection.Close(); err != nil { + log.Printf("err connection close: %v", err) + } + return nil +} + +func (pc *PubChannel) Md5() []byte { + return pc.md5hash.Sum(nil) +} diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go new file mode 100644 index 000000000..213ff4666 --- /dev/null +++ b/weed/messaging/msgclient/chan_sub.go @@ -0,0 +1,85 @@ +package msgclient + +import ( + "context" + "crypto/md5" + "hash" + "io" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type SubChannel struct { + ch chan []byte + stream messaging_pb.SeaweedMessaging_SubscribeClient + md5hash hash.Hash + cancel context.CancelFunc +} + +func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0)) + if err != nil { + return nil, err + } + + t := &SubChannel{ + ch: make(chan []byte), + stream: sc, + md5hash: md5.New(), + cancel: cancel, + } + + go func() { + for { + resp, subErr := t.stream.Recv() + if subErr == io.EOF { + return + } + if subErr != nil { + log.Printf("fail to receive from netchan %s: %v", chanName, subErr) + return + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + if resp.Data.IsClose { + t.stream.Send(&messaging_pb.SubscriberMessage{ + IsClose: true, + }) + close(t.ch) + cancel() + return + } + t.ch <- resp.Data.Value + t.md5hash.Write(resp.Data.Value) + } + }() + + return t, nil +} + +func (sc *SubChannel) Channel() chan []byte { + return sc.ch +} + +func (sc *SubChannel) Md5() []byte { + return sc.md5hash.Sum(nil) +} + +func (sc *SubChannel) Cancel() { + sc.cancel() +} diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go new file mode 100644 index 000000000..4d7ef2b8e --- /dev/null +++ b/weed/messaging/msgclient/client.go @@ -0,0 +1,55 @@ +package msgclient + +import ( + "context" + "fmt" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessagingClient struct { + bootstrapBrokers []string + grpcConnections map[broker.TopicPartition]*grpc.ClientConn + grpcDialOption grpc.DialOption +} + +func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient { + return &MessagingClient{ + bootstrapBrokers: bootstrapBrokers, + grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"), + } +} + +func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) { + + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(), + &messaging_pb.FindBrokerRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Parition: tp.Partition, + }) + if err != nil { + return nil, err + } + + targetBroker := resp.Broker + return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption) + } + return nil, fmt.Errorf("no broker found for %+v", tp) +} diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go new file mode 100644 index 000000000..2b9eba1a8 --- /dev/null +++ b/weed/messaging/msgclient/config.go @@ -0,0 +1,63 @@ +package msgclient + +import ( + "context" + "log" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.ConfigureTopic(context.Background(), + &messaging_pb.ConfigureTopicRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Configuration: &messaging_pb.TopicConfiguration{ + PartitionCount: 0, + Collection: "", + Replication: "", + IsTransient: false, + Partitoning: 0, + }, + }) + return err + }) + +} + +func (mc *MessagingClient) DeleteTopic(namespace, topic string) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.DeleteTopic(context.Background(), + &messaging_pb.DeleteTopicRequest{ + Namespace: namespace, + Topic: topic, + }) + return err + }) +} + +func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + var lastErr error + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection)) + if err == nil { + return nil + } + lastErr = err + } + + return lastErr +} diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go new file mode 100644 index 000000000..1aa483ff8 --- /dev/null +++ b/weed/messaging/msgclient/publisher.go @@ -0,0 +1,118 @@ +package msgclient + +import ( + "context" + + "github.com/OneOfOne/xxhash" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type Publisher struct { + publishClients []messaging_pb.SeaweedMessaging_PublishClient + topicConfiguration *messaging_pb.TopicConfiguration + messageCount uint64 + publisherId string +} + +func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount) + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + client, err := setupPublisherClient(grpcClientConn, tp) + if err != nil { + return nil, err + } + publishClients[i] = client + } + return &Publisher{ + publishClients: publishClients, + topicConfiguration: topicConfiguration, + }, nil +} + +func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) { + + stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background()) + if err != nil { + return nil, err + } + + // send init message + err = stream.Send(&messaging_pb.PublishRequest{ + Init: &messaging_pb.PublishRequest_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + }, + }) + if err != nil { + return nil, err + } + + // process init response + initResponse, err := stream.Recv() + if err != nil { + return nil, err + } + if initResponse.Redirect != nil { + // TODO follow redirection + } + if initResponse.Config != nil { + } + + // setup looks for control messages + doneChan := make(chan error, 1) + go func() { + for { + in, err := stream.Recv() + if err != nil { + doneChan <- err + return + } + if in.Redirect != nil { + } + if in.Config != nil { + } + } + }() + + return stream, nil + +} + +func (p *Publisher) Publish(m *messaging_pb.Message) error { + hashValue := p.messageCount + p.messageCount++ + if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash { + if m.Key != nil { + hashValue = xxhash.Checksum64(m.Key) + } + } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash { + hashValue = xxhash.Checksum64(m.Key) + } else { + // round robin + } + + idx := int(hashValue) % len(p.publishClients) + if idx < 0 { + idx += len(p.publishClients) + } + return p.publishClients[idx].Send(&messaging_pb.PublishRequest{ + Data: m, + }) +} diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go new file mode 100644 index 000000000..6c7dc1ab7 --- /dev/null +++ b/weed/messaging/msgclient/subscriber.go @@ -0,0 +1,120 @@ +package msgclient + +import ( + "context" + "io" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "google.golang.org/grpc" +) + +type Subscriber struct { + subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient + subscriberCancels []context.CancelFunc + subscriberId string +} + +func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount) + subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount) + + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + if partitionId >= 0 && i != partitionId { + continue + } + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime) + if err != nil { + return nil, err + } + subscriberClients[i] = client + subscriberCancels[i] = cancel + } + + return &Subscriber{ + subscriberClients: subscriberClients, + subscriberCancels: subscriberCancels, + subscriberId: subscriberId, + }, nil +} + +func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) { + stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx) + if err != nil { + return + } + + // send init message + err = stream.Send(&messaging_pb.SubscriberMessage{ + Init: &messaging_pb.SubscriberMessage_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP, + TimestampNs: startTime.UnixNano(), + SubscriberId: subscriberId, + }, + }) + if err != nil { + return + } + + return stream, nil +} + +func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error { + for { + resp, listenErr := subscriberClient.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + println(listenErr.Error()) + return listenErr + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + processFn(resp.Data) + } +} + +// Subscribe starts goroutines to process the messages +func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) { + var wg sync.WaitGroup + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberClients[i] != nil { + wg.Add(1) + go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) { + defer wg.Done() + doSubscribe(subscriberClient, processFn) + }(s.subscriberClients[i]) + } + } + wg.Wait() +} + +func (s *Subscriber) Shutdown() { + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberCancels[i] != nil { + s.subscriberCancels[i]() + } + } +} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index 4c1302abb..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,14 +27,14 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..541a453e9 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -4,14 +4,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" - "github.com/spf13/viper" ) type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +20,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *util.ViperProxy, prefix string) { if config == nil { return @@ -30,9 +29,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } @@ -44,7 +42,7 @@ func LoadConfiguration(config *viper.Viper) { } -func validateOneEnabledQueue(config *viper.Viper) { +func validateOneEnabledQueue(config *util.ViperProxy) { enabledQueue := "" for _, queue := range MessageQueues { if config.GetBool(queue.GetName() + ".enabled") { diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index ebf44ea6f..01c4d901f 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -17,23 +17,34 @@ package gocdk_pub_sub import ( "context" "fmt" + "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + "gocloud.dev/pubsub/rabbitpubsub" + "net/url" + "path" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" - "gocloud.dev/pubsub" - _ "gocloud.dev/pubsub/awssnssqs" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" _ "gocloud.dev/pubsub/rabbitpubsub" + "os" ) func init() { notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{}) } +func getPath(rawUrl string) string { + parsedUrl, _ := url.Parse(rawUrl) + return path.Join(parsedUrl.Host, parsedUrl.Path) +} + type GoCDKPubSub struct { topicURL string topic *pubsub.Topic @@ -43,14 +54,37 @@ func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) Initialize(config util.Configuration) error { - k.topicURL = config.GetString("topic_url") +func (k *GoCDKPubSub) doReconnect() { + var conn *amqp.Connection + if k.topic.As(&conn) { + go func() { + <-conn.NotifyClose(make(chan *amqp.Error)) + conn.Close() + k.topic.Shutdown(context.Background()) + for { + glog.Info("Try reconnect") + conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) + if err == nil { + k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil) + k.doReconnect() + break + } + glog.Error(err) + time.Sleep(time.Second) + } + }() + } +} + +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { glog.Fatalf("Failed to open topic: %v", err) } k.topic = topic + k.doReconnect() return nil } @@ -59,8 +93,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { if err != nil { return err } - ctx := context.Background() - err = k.topic.Send(ctx, &pubsub.Message{ + err = k.topic.Send(context.Background(), &pubsub.Message{ Body: bytes, Metadata: map[string]string{"key": key}, }) diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index fd545722b..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 2dfa44483..ffd3e4938 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,11 +3,14 @@ package operation import ( "context" "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strings" ) type VolumeAssignRequest struct { @@ -15,6 +18,7 @@ type VolumeAssignRequest struct { Replication string Collection string Ttl string + DiskType string DataCenter string Rack string DataNode string @@ -30,7 +34,7 @@ type AssignResult struct { Auth security.EncodedJwt `json:"auth,omitempty"` } -func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -44,17 +48,18 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } - lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ - Count: primaryRequest.Count, - Replication: primaryRequest.Replication, - Collection: primaryRequest.Collection, - Ttl: primaryRequest.Ttl, - DataCenter: primaryRequest.DataCenter, - Rack: primaryRequest.Rack, - DataNode: primaryRequest.DataNode, - WritableVolumeCount: primaryRequest.WritableVolumeCount, + Count: request.Count, + Replication: request.Replication, + Collection: request.Collection, + Ttl: request.Ttl, + DiskType: request.DiskType, + DataCenter: request.DataCenter, + Rack: request.Rack, + DataNode: request.DataNode, + WritableVolumeCount: request.WritableVolumeCount, } resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { @@ -81,6 +86,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } + break } return ret, lastError @@ -99,3 +105,44 @@ func LookupJwt(master string, fileId string) security.EncodedJwt { return security.EncodedJwt(tokenStr) } + +type StorageOption struct { + Replication string + DiskType string + Collection string + DataCenter string + Rack string + TtlSeconds int32 + Fsync bool + VolumeGrowthCount uint32 +} + +func (so *StorageOption) TtlString() string { + return needle.SecondsToTTL(so.TtlSeconds) +} + +func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) { + ar = &VolumeAssignRequest{ + Count: uint64(count), + Replication: so.Replication, + Collection: so.Collection, + Ttl: so.TtlString(), + DiskType: so.DiskType, + DataCenter: so.DataCenter, + Rack: so.Rack, + WritableVolumeCount: so.VolumeGrowthCount, + } + if so.DataCenter != "" || so.Rack != "" { + altRequest = &VolumeAssignRequest{ + Count: uint64(count), + Replication: so.Replication, + Collection: so.Collection, + Ttl: so.TtlString(), + DiskType: so.DiskType, + DataCenter: "", + Rack: "", + WritableVolumeCount: so.VolumeGrowthCount, + } + } + return +} diff --git a/weed/operation/buffer_pool.go b/weed/operation/buffer_pool.go new file mode 100644 index 000000000..9cbe4787f --- /dev/null +++ b/weed/operation/buffer_pool.go @@ -0,0 +1,24 @@ +package operation + +import ( + "github.com/valyala/bytebufferpool" + "sync/atomic" +) + +var bufferCounter int64 + +func GetBuffer() *bytebufferpool.ByteBuffer { + defer func() { + atomic.AddInt64(&bufferCounter, 1) + // println("+", bufferCounter) + }() + return bytebufferpool.Get() +} + +func PutBuffer(buf *bytebufferpool.ByteBuffer) { + defer func() { + atomic.AddInt64(&bufferCounter, -1) + // println("-", bufferCounter) + }() + bytebufferpool.Put(buf) +} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 295204dd8..8506e0518 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -8,11 +8,10 @@ import ( "io/ioutil" "net/http" "sort" + "sync" "google.golang.org/grpc" - "sync" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -41,23 +40,24 @@ type ChunkManifest struct { // seekable chunked file reader type ChunkedFileReader struct { - Manifest *ChunkManifest - Master string - pos int64 - pr *io.PipeReader - pw *io.PipeWriter - mutex sync.Mutex + totalSize int64 + chunkList []*ChunkInfo + master string + pos int64 + pr *io.PipeReader + pw *io.PipeWriter + mutex sync.Mutex } func (s ChunkList) Len() int { return len(s) } func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset } func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) { - if isGzipped { +func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) { + if isCompressed { var err error - if buffer, err = util.UnGzipData(buffer); err != nil { - return nil, err + if buffer, err = util.DecompressData(buffer); err != nil { + glog.V(0).Infof("fail to decompress chunk manifest: %v", err) } } cm := ChunkManifest{} @@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error { +func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, grpcDialOption, fileIds) + results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) @@ -126,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, return io.Copy(w, resp.Body) } +func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader { + var totalSize int64 + for _, chunk := range chunkList { + totalSize += chunk.Size + } + sort.Sort(ChunkList(chunkList)) + return &ChunkedFileReader{ + totalSize: totalSize, + chunkList: chunkList, + master: master, + } +} + func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { var err error switch whence { - case 0: - case 1: + case io.SeekStart: + case io.SeekCurrent: offset += cf.pos - case 2: - offset = cf.Manifest.Size - offset + case io.SeekEnd: + offset = cf.totalSize + offset } - if offset > cf.Manifest.Size { + if offset > cf.totalSize { err = ErrInvalidRange } if cf.pos != offset { @@ -146,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { } func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { - cm := cf.Manifest chunkIndex := -1 chunkStartOffset := int64(0) - for i, ci := range cm.Chunks { + for i, ci := range cf.chunkList { if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size { chunkIndex = i chunkStartOffset = cf.pos - ci.Offset @@ -159,10 +171,12 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { if chunkIndex < 0 { return n, ErrInvalidRange } - for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ { - ci := cm.Chunks[chunkIndex] + for ; chunkIndex < len(cf.chunkList); chunkIndex++ { + ci := cf.chunkList[chunkIndex] // if we need read date from local volume server first? - fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid) + fileUrl, lookupError := LookupFileId(func() string { + return cf.master + }, ci.Fid) if lookupError != nil { return n, lookupError } diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 358399324..8f87882b1 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "google.golang.org/grpc" "net/http" "strings" "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -28,10 +28,18 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { - - lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, grpcDialOption, vids) +func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { + + lookupFunc := func(vids []string) (results map[string]LookupResult, err error) { + results, err = LookupVolumeIds(masterFn, grpcDialOption, vids) + if err == nil && usePublicUrl { + for _, result := range results { + for _, loc := range result.Locations { + loc.Url = loc.PublicUrl + } + } + } + return } return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) @@ -92,9 +100,9 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str go func(server string, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil { err = deleteErr - } else { + } else if deleteResults != nil { resultChan <- deleteResults } @@ -107,18 +115,17 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str ret = append(ret, result...) } - glog.V(1).Infof("deleted %d items", len(ret)) - return ret, err } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { +func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) { err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ - FileIds: fileIds, + FileIds: fileIds, + SkipCookieCheck: !includeCookie, } resp, err := volumeServerClient.BatchDelete(context.Background(), req) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index f6b2b69e9..025a65b38 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,27 +1,27 @@ package operation import ( - "context" "fmt" + "strconv" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strconv" - "strings" ) func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { - ctx := context.Background() - grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { - return err + return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) }, grpcAddress, grpcDialOption) @@ -40,16 +40,28 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - ctx := context.Background() - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) + return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) }, masterGrpcAddress, grpcDialOption) } + +func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr) + } + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index d0773e7fd..0372e47b0 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -33,10 +33,10 @@ var ( vc VidCache // caching of volume locations, re-check if after 10 minutes ) -func Lookup(server string, vid string) (ret *LookupResult, err error) { +func Lookup(masterFn GetMasterFn, vid string) (ret *LookupResult, err error) { locations, cache_err := vc.Get(vid) if cache_err != nil { - if ret, err = do_lookup(server, vid); err == nil { + if ret, err = do_lookup(masterFn, vid); err == nil { vc.Set(vid, ret.Locations, 10*time.Minute) } } else { @@ -45,9 +45,10 @@ func Lookup(server string, vid string) (ret *LookupResult, err error) { return } -func do_lookup(server string, vid string) (*LookupResult, error) { +func do_lookup(masterFn GetMasterFn, vid string) (*LookupResult, error) { values := make(url.Values) values.Add("volumeId", vid) + server := masterFn() jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values) if err != nil { return nil, err @@ -63,12 +64,12 @@ func do_lookup(server string, vid string) (*LookupResult, error) { return &ret, nil } -func LookupFileId(server string, fileId string) (fullUrl string, err error) { +func LookupFileId(masterFn GetMasterFn, fileId string) (fullUrl string, err error) { parts := strings.Split(fileId, ",") if len(parts) != 2 { return "", errors.New("Invalid fileId " + fileId) } - lookup, lookupError := Lookup(server, parts[0]) + lookup, lookupError := Lookup(masterFn, parts[0]) if lookupError != nil { return "", lookupError } @@ -79,7 +80,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) { } // LookupVolumeIds find volume locations by cache and actual lookup -func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { +func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { ret := make(map[string]LookupResult) var unknown_vids []string @@ -99,7 +100,7 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids - err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go new file mode 100644 index 000000000..202374e1b --- /dev/null +++ b/weed/operation/needle_parse_test.go @@ -0,0 +1,131 @@ +package operation + +import ( + "bytes" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MockClient struct { + needleHandling func(n *needle.Needle, originalSize int, e error) +} + +func (m *MockClient) Do(req *http.Request) (*http.Response, error) { + n, originalSize, _, err := needle.CreateNeedleFromRequest(req, false, 1024*1024) + if m.needleHandling != nil { + m.needleHandling(n, originalSize, err) + } + return &http.Response{ + StatusCode: http.StatusNoContent, + }, io.EOF +} + +/* + +The mime type is always the value passed in. + +Compress or not depends on the content detection, file name extension, and compression ratio. + +If the content is already compressed, need to know the content size. + +*/ + +func TestCreateNeedleFromRequest(t *testing.T) { + mc := &MockClient{} + tmp := HttpClient + HttpClient = mc + defer func() { + HttpClient = tmp + }() + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize) + } + uploadResult, err, data := Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader([]byte(textContent)), false, "", nil, "") + if len(data) != len(textContent) { + t.Errorf("data actual %d expected %d", len(data), len(textContent)) + } + if err != nil { + fmt.Printf("err: %v\n", err) + } + fmt.Printf("uploadResult: %+v\n", uploadResult) + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + gzippedData, _ := util.GzipData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(gzippedData), true, "text/plain", nil, "") + } + + /* + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), true, "text/plain", nil, "") + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "application/zstd", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, false, n.IsCompressed(), "this should not be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should still be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), false, "application/zstd", nil, "") + } + */ + +} + +var textContent = `Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +` diff --git a/weed/operation/stats.go b/weed/operation/stats.go deleted file mode 100644 index b69a33750..000000000 --- a/weed/operation/stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package operation - -import ( - "context" - "google.golang.org/grpc" - - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" -) - -func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - - err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - - grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) - if grpcErr != nil { - return grpcErr - } - - resp = grpcResponse - - return nil - - }) - - return -} diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 62f067430..87c5e4279 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,8 +1,6 @@ package operation import ( - "bytes" - "google.golang.org/grpc" "io" "mime" "net/url" @@ -11,6 +9,8 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -25,20 +25,23 @@ type FilePart struct { Collection string DataCenter string Ttl string + DiskType string Server string //this comes from assign result Fid string //this comes from assign result, but customizable + Fsync bool } type SubmitResult struct { FileName string `json:"fileName,omitempty"` - FileUrl string `json:"fileUrl,omitempty"` + FileUrl string `json:"url,omitempty"` Fid string `json:"fid,omitempty"` Size uint32 `json:"size,omitempty"` Error string `json:"error,omitempty"` } -func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { +type GetMasterFn func() string + +func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -49,10 +52,11 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart Collection: collection, DataCenter: dataCenter, Ttl: ttl, + DiskType: diskType, } - ret, err := Assign(master, grpcDialOption, ar) + ret, err := Assign(masterFn, grpcDialOption, ar) if err != nil { - for index, _ := range files { + for index := range files { results[index].Error = err.Error() } return results, err @@ -63,10 +67,15 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart file.Fid = file.Fid + "_" + strconv.Itoa(index) } file.Server = ret.Url + if usePublicUrl { + file.Server = ret.PublicUrl + } file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption) + file.Ttl = ttl + file.DiskType = diskType + results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -109,11 +118,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { +func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) } + if fi.Fsync { + fileUrl += "?fsync=true" + } if closer, ok := fi.Reader.(io.Closer); ok { defer closer.Close() } @@ -136,8 +148,9 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp Replication: fi.Replication, Collection: fi.Collection, Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(master, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { return } @@ -149,11 +162,12 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp Replication: fi.Replication, Collection: fi.Collection, Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(master, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) return } id = ret.Fid @@ -164,14 +178,17 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp } } fileUrl := "http://" + ret.Url + "/" + id + if usePublicUrl { + fileUrl = "http://" + ret.PublicUrl + "/" + id + } count, e := upload_one_chunk( baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), - master, fileUrl, + masterFn, fileUrl, ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -186,10 +203,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) + ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -198,12 +215,11 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp return } -func upload_one_chunk(filename string, reader io.Reader, master, +func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") - uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "", nil, jwt) + uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) if uploadError != nil { return 0, uploadError } @@ -215,12 +231,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s if e != nil { return e } - bufReader := bytes.NewReader(buf) glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) + _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index b53f18ce1..045948274 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -5,14 +5,15 @@ import ( "fmt" "io" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "google.golang.org/grpc" ) -func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { +func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { // find volume location, replication, ttl info - lookup, err := Lookup(master, vid.String()) + lookup, err := Lookup(masterFn, vid.String()) if err != nil { return fmt.Errorf("look up volume %d: %v", vid, err) } @@ -27,8 +28,10 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{ VolumeId: uint32(vid), SinceNs: sinceNs, IdleTimeoutSeconds: uint32(idleTimeoutSeconds), diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index c387d0230..944186eeb 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,10 +2,7 @@ package operation import ( "bytes" - "compress/flate" - "compress/gzip" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -15,73 +12,188 @@ import ( "net/textproto" "path/filepath" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) type UploadResult struct { - Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` - Error string `json:"error,omitempty"` - ETag string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + Size uint32 `json:"size,omitempty"` + Error string `json:"error,omitempty"` + ETag string `json:"eTag,omitempty"` + CipherKey []byte `json:"cipherKey,omitempty"` + Mime string `json:"mime,omitempty"` + Gzip uint32 `json:"gzip,omitempty"` + ContentMd5 string `json:"contentMd5,omitempty"` + RetryCount int `json:"-"` +} + +func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { + fid, _ := filer_pb.ToFileIdObject(fileId) + return &filer_pb.FileChunk{ + FileId: fileId, + Offset: offset, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsCompressed: uploadResult.Gzip > 0, + Fid: fid, + } +} + +// HTTPClient interface for testing +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) } var ( - client *http.Client + HttpClient HTTPClient ) func init() { - client = &http.Client{Transport: &http.Transport{ + HttpClient = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} } -var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") +var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { - if compressionLevel < 1 { - compressionLevel = 1 - } - if compressionLevel > 9 { - compressionLevel = 9 - } - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) +func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + uploadResult, err = retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + return } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputCompressed, mtype, pairMap, jwt) + return +} + +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + bytesReader, ok := reader.(*util.BytesReader) + if ok { + data = bytesReader.Bytes + } else { + data, err = ioutil.ReadAll(reader) + if err != nil { + err = fmt.Errorf("read input: %v", err) + return + } + } + uploadResult, uploadErr := retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + return uploadResult, uploadErr, data } -func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { - contentIsGzipped := isGzipped +func retriedUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + for i := 0; i < 3; i++ { + uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + if err == nil { + uploadResult.RetryCount = i + return + } else { + glog.Warningf("uploading to %s: %v", uploadUrl, err) + } + time.Sleep(time.Millisecond * time.Duration(237*(i+1))) + } + return +} + +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputCompressed shouldGzipNow := false - if !isGzipped { - if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + if !isInputCompressed { + if mtype == "" { + mtype = http.DetectContentType(data) + // println("detect1 mimetype to", mtype) + if mtype == "application/octet-stream" { + mtype = "" + } + } + if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeCompressed { shouldGzipNow = true + } else if !iAmSure && mtype == "" && len(data) > 16*1024 { + var compressed []byte + compressed, err = util.GzipData(data[0:128]) + shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90% + } + } + + var clearDataLen int + + // gzip if possible + // this could be double copying + clearDataLen = len(data) + clearData := data + if shouldGzipNow && !cipher { + compressed, compressErr := util.GzipData(data) + // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed)) + if compressErr == nil { + data = compressed contentIsGzipped = true } + } else if isInputCompressed { + // just to get the clear data length + clearData, err = util.DecompressData(data) + if err == nil { + clearDataLen = len(clearData) + } } - return upload_content(uploadUrl, func(w io.Writer) (err error) { - if shouldGzipNow { - gzWriter, _ := gzip.NewWriterLevel(w, compression) - _, err = io.Copy(gzWriter, reader) - gzWriter.Close() - } else { - _, err = io.Copy(w, reader) + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return } - return - }, filename, contentIsGzipped, mtype, pairMap, jwt) + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, len(encryptedData), "", nil, jwt) + if uploadResult == nil { + return + } + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + uploadResult.Size = uint32(clearDataLen) + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, len(data), mtype, pairMap, jwt) + if uploadResult == nil { + return + } + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } + } + + return uploadResult, err } -func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - body_buf := bytes.NewBufferString("") - body_writer := multipart.NewWriter(body_buf) +func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + buf := GetBuffer() + defer PutBuffer(buf) + body_writer := multipart.NewWriter(buf) h := make(textproto.MIMEHeader) h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename))) + h.Set("Idempotency-Key", uploadUrl) if mtype == "" { mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename))) } @@ -107,10 +219,10 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error return nil, err } - req, postErr := http.NewRequest("POST", uploadUrl, body_buf) + req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes())) if postErr != nil { - glog.V(0).Infoln("failing to upload to", uploadUrl, postErr.Error()) - return nil, postErr + glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr) + return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr) } req.Header.Set("Content-Type", content_type) for k, v := range pairMap { @@ -119,27 +231,42 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if jwt != "" { req.Header.Set("Authorization", "BEARER "+string(jwt)) } - resp, post_err := client.Do(req) + // print("+") + resp, post_err := HttpClient.Do(req) if post_err != nil { - glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) - return nil, post_err + if strings.Contains(post_err.Error(), "connection reset by peer") || + strings.Contains(post_err.Error(), "use of closed network connection") { + resp, post_err = HttpClient.Do(req) + } } - defer resp.Body.Close() + if post_err != nil { + return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err) + } + // print("-") + defer util.CloseResponse(resp) + + var ret UploadResult etag := getEtag(resp) + if resp.StatusCode == http.StatusNoContent { + ret.ETag = etag + return &ret, nil + } + resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { - return nil, ra_err + return nil, fmt.Errorf("read response body %v: %v", uploadUrl, ra_err) } - var ret UploadResult + unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body)) - return nil, unmarshal_err + glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body)) + return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err) } if ret.Error != "" { - return nil, errors.New(ret.Error) + return nil, fmt.Errorf("unmarshalled error %v: %v", uploadUrl, ret.Error) } ret.ETag = etag + ret.ContentMd5 = resp.Header.Get("Content-MD5") return &ret, nil } diff --git a/weed/pb/Makefile b/weed/pb/Makefile index c50410574..d2618937b 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -3,8 +3,10 @@ all: gen .PHONY : gen gen: - protoc master.proto --go_out=plugins=grpc:./master_pb - protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb - protoc filer.proto --go_out=plugins=grpc:./filer_pb + protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative + protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative + protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative + protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative + protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index ef847cbe7..ac4c9a0e7 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package filer_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -21,6 +22,9 @@ service SeaweedFiler { rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) { } + rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) { + } + rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } @@ -33,6 +37,9 @@ service SeaweedFiler { rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } @@ -42,6 +49,24 @@ service SeaweedFiler { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { } + rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + + rpc KvGet (KvGetRequest) returns (KvGetResponse) { + } + + rpc KvPut (KvPutRequest) returns (KvPutResponse) { + } + } ////////////////////////////////////////////////// @@ -73,6 +98,9 @@ message Entry { repeated FileChunk chunks = 3; FuseAttributes attributes = 4; map<string, bytes> extended = 5; + bytes hard_link_id = 7; + int32 hard_link_counter = 8; // only exists in hard link meta data + bytes content = 9; // if not empty, the file content } message FullEntry { @@ -85,6 +113,8 @@ message EventNotification { Entry new_entry = 2; bool delete_chunks = 3; string new_parent_path = 4; + bool is_from_other_cluster = 5; + repeated int32 signatures = 6; } message FileChunk { @@ -96,6 +126,13 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; + bool is_compressed = 10; + bool is_chunk_manifest = 11; // content is a list of FileChunks +} + +message FileChunkManifest { + repeated FileChunk chunks = 1; } message FileId { @@ -118,23 +155,39 @@ message FuseAttributes { string user_name = 11; // for hdfs repeated string group_name = 12; // for hdfs string symlink_target = 13; + bytes md5 = 14; + string disk_type = 15; } message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; + bool is_from_other_cluster = 4; + repeated int32 signatures = 5; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { string directory = 1; Entry entry = 2; + bool is_from_other_cluster = 3; + repeated int32 signatures = 4; } message UpdateEntryResponse { } +message AppendToEntryRequest { + string directory = 1; + string entry_name = 2; + repeated FileChunk chunks = 3; +} +message AppendToEntryResponse { +} + message DeleteEntryRequest { string directory = 1; string name = 2; @@ -142,9 +195,12 @@ message DeleteEntryRequest { bool is_delete_data = 4; bool is_recursive = 5; bool ignore_recursive_error = 6; + bool is_from_other_cluster = 7; + repeated int32 signatures = 8; } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { @@ -163,6 +219,9 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string path = 6; + string rack = 7; + string disk_type = 8; } message AssignVolumeResponse { @@ -171,6 +230,9 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -189,6 +251,16 @@ message LookupVolumeResponse { map<string, Locations> locations_map = 1; } +message Collection { + string name = 1; +} +message CollectionListRequest { + bool include_normal_volumes = 1; + bool include_ec_volumes = 2; +} +message CollectionListResponse { + repeated Collection collections = 1; +} message DeleteCollectionRequest { string collection = 1; } @@ -200,11 +272,9 @@ message StatisticsRequest { string replication = 1; string collection = 2; string ttl = 3; + string disk_type = 4; } message StatisticsResponse { - string replication = 1; - string collection = 2; - string ttl = 3; uint64 total_size = 4; uint64 used_size = 5; uint64 file_count = 6; @@ -217,4 +287,80 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; + bool cipher = 7; + int32 signature = 8; + string metrics_address = 9; + int32 metrics_interval_sec = 10; +} + +message SubscribeMetadataRequest { + string client_name = 1; + string path_prefix = 2; + int64 since_ns = 3; + int32 signature = 4; +} +message SubscribeMetadataResponse { + string directory = 1; + EventNotification event_notification = 2; + int64 ts_ns = 3; +} + +message LogEntry { + int64 ts_ns = 1; + int32 partition_key_hash = 2; + bytes data = 3; +} + +message KeepConnectedRequest { + string name = 1; + uint32 grpc_port = 2; + repeated string resources = 3; +} +message KeepConnectedResponse { +} + +message LocateBrokerRequest { + string resource = 1; +} +message LocateBrokerResponse { + bool found = 1; + // if found, send the exact address + // if not found, send the full list of existing brokers + message Resource { + string grpc_addresses = 1; + int32 resource_count = 2; + } + repeated Resource resources = 2; +} + +// Key-Value operations +message KvGetRequest { + bytes key = 1; +} +message KvGetResponse { + bytes value = 1; + string error = 2; +} +message KvPutRequest { + bytes key = 1; + bytes value = 2; +} +message KvPutResponse { + string error = 1; +} + +// path-based configurations +message FilerConf { + int32 version = 1; + message PathConf { + string location_prefix = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string disk_type = 5; + bool fsync = 6; + uint32 volume_growth_count = 7; + } + repeated PathConf locations = 2; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index c8214aa94..902c39514 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,1064 +1,4535 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: filer.proto -// DO NOT EDIT! - -/* -Package filer_pb is a generated protocol buffer package. - -It is generated from these files: - filer.proto - -It has these top-level messages: - LookupDirectoryEntryRequest - LookupDirectoryEntryResponse - ListEntriesRequest - ListEntriesResponse - Entry - FullEntry - EventNotification - FileChunk - FileId - FuseAttributes - CreateEntryRequest - CreateEntryResponse - UpdateEntryRequest - UpdateEntryResponse - DeleteEntryRequest - DeleteEntryResponse - AtomicRenameEntryRequest - AtomicRenameEntryResponse - AssignVolumeRequest - AssignVolumeResponse - LookupVolumeRequest - Locations - Location - LookupVolumeResponse - DeleteCollectionRequest - DeleteCollectionResponse - StatisticsRequest - StatisticsResponse - GetFilerConfigurationRequest - GetFilerConfigurationResponse -*/ -package filer_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package filer_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type LookupDirectoryEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *LookupDirectoryEntryRequest) Reset() { + *x = LookupDirectoryEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LookupDirectoryEntryRequest) Reset() { *m = LookupDirectoryEntryRequest{} } -func (m *LookupDirectoryEntryRequest) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryRequest) ProtoMessage() {} -func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *LookupDirectoryEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryRequest) ProtoMessage() {} -func (m *LookupDirectoryEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupDirectoryEntryRequest.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{0} +} + +func (x *LookupDirectoryEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *LookupDirectoryEntryRequest) GetName() string { - if m != nil { - return m.Name +func (x *LookupDirectoryEntryRequest) GetName() string { + if x != nil { + return x.Name } return "" } type LookupDirectoryEntryResponse struct { - Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *LookupDirectoryEntryResponse) Reset() { + *x = LookupDirectoryEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LookupDirectoryEntryResponse) Reset() { *m = LookupDirectoryEntryResponse{} } -func (m *LookupDirectoryEntryResponse) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryResponse) ProtoMessage() {} -func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *LookupDirectoryEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryResponse) ProtoMessage() {} + +func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupDirectoryEntryResponse.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{1} +} -func (m *LookupDirectoryEntryResponse) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *LookupDirectoryEntryResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type ListEntriesRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"` - StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"` - InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"` - Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"` + InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"` + Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *ListEntriesRequest) Reset() { + *x = ListEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest) ProtoMessage() {} + +func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} } -func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) } -func (*ListEntriesRequest) ProtoMessage() {} -func (*ListEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{2} +} -func (m *ListEntriesRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *ListEntriesRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *ListEntriesRequest) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *ListEntriesRequest) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } -func (m *ListEntriesRequest) GetStartFromFileName() string { - if m != nil { - return m.StartFromFileName +func (x *ListEntriesRequest) GetStartFromFileName() string { + if x != nil { + return x.StartFromFileName } return "" } -func (m *ListEntriesRequest) GetInclusiveStartFrom() bool { - if m != nil { - return m.InclusiveStartFrom +func (x *ListEntriesRequest) GetInclusiveStartFrom() bool { + if x != nil { + return x.InclusiveStartFrom } return false } -func (m *ListEntriesRequest) GetLimit() uint32 { - if m != nil { - return m.Limit +func (x *ListEntriesRequest) GetLimit() uint32 { + if x != nil { + return x.Limit } return 0 } type ListEntriesResponse struct { - Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *ListEntriesResponse) Reset() { + *x = ListEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesResponse) ProtoMessage() {} + +func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } -func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) } -func (*ListEntriesResponse) ProtoMessage() {} -func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead. +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{3} +} -func (m *ListEntriesResponse) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *ListEntriesResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type Entry struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"` - Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"` - Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"` - Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` + Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HardLinkId []byte `protobuf:"bytes,7,opt,name=hard_link_id,json=hardLinkId,proto3" json:"hard_link_id,omitempty"` + HardLinkCounter int32 `protobuf:"varint,8,opt,name=hard_link_counter,json=hardLinkCounter,proto3" json:"hard_link_counter,omitempty"` // only exists in hard link meta data + Content []byte `protobuf:"bytes,9,opt,name=content,proto3" json:"content,omitempty"` // if not empty, the file content +} + +func (x *Entry) Reset() { + *x = Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Entry) ProtoMessage() {} -func (m *Entry) GetName() string { - if m != nil { - return m.Name +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{4} +} + +func (x *Entry) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Entry) GetIsDirectory() bool { - if m != nil { - return m.IsDirectory +func (x *Entry) GetIsDirectory() bool { + if x != nil { + return x.IsDirectory } return false } -func (m *Entry) GetChunks() []*FileChunk { - if m != nil { - return m.Chunks +func (x *Entry) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks + } + return nil +} + +func (x *Entry) GetAttributes() *FuseAttributes { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Entry) GetExtended() map[string][]byte { + if x != nil { + return x.Extended } return nil } -func (m *Entry) GetAttributes() *FuseAttributes { - if m != nil { - return m.Attributes +func (x *Entry) GetHardLinkId() []byte { + if x != nil { + return x.HardLinkId } return nil } -func (m *Entry) GetExtended() map[string][]byte { - if m != nil { - return m.Extended +func (x *Entry) GetHardLinkCounter() int32 { + if x != nil { + return x.HardLinkCounter + } + return 0 +} + +func (x *Entry) GetContent() []byte { + if x != nil { + return x.Content } return nil } type FullEntry struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *FullEntry) Reset() { + *x = FullEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FullEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FullEntry) ProtoMessage() {} + +func (x *FullEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *FullEntry) Reset() { *m = FullEntry{} } -func (m *FullEntry) String() string { return proto.CompactTextString(m) } -func (*FullEntry) ProtoMessage() {} -func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +// Deprecated: Use FullEntry.ProtoReflect.Descriptor instead. +func (*FullEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{5} +} -func (m *FullEntry) GetDir() string { - if m != nil { - return m.Dir +func (x *FullEntry) GetDir() string { + if x != nil { + return x.Dir } return "" } -func (m *FullEntry) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *FullEntry) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type EventNotification struct { - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` - NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,6,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` +} + +func (x *EventNotification) Reset() { + *x = EventNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventNotification) ProtoMessage() {} + +func (x *EventNotification) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *EventNotification) Reset() { *m = EventNotification{} } -func (m *EventNotification) String() string { return proto.CompactTextString(m) } -func (*EventNotification) ProtoMessage() {} -func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +// Deprecated: Use EventNotification.ProtoReflect.Descriptor instead. +func (*EventNotification) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{6} +} -func (m *EventNotification) GetOldEntry() *Entry { - if m != nil { - return m.OldEntry +func (x *EventNotification) GetOldEntry() *Entry { + if x != nil { + return x.OldEntry } return nil } -func (m *EventNotification) GetNewEntry() *Entry { - if m != nil { - return m.NewEntry +func (x *EventNotification) GetNewEntry() *Entry { + if x != nil { + return x.NewEntry } return nil } -func (m *EventNotification) GetDeleteChunks() bool { - if m != nil { - return m.DeleteChunks +func (x *EventNotification) GetDeleteChunks() bool { + if x != nil { + return x.DeleteChunks } return false } -func (m *EventNotification) GetNewParentPath() string { - if m != nil { - return m.NewParentPath +func (x *EventNotification) GetNewParentPath() string { + if x != nil { + return x.NewParentPath } return "" } +func (x *EventNotification) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + +func (x *EventNotification) GetSignatures() []int32 { + if x != nil { + return x.Signatures + } + return nil +} + type FileChunk struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` - Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` - Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` - ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` - SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` - Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` - SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"` + ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` + SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated + Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` + SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"` + IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks +} + +func (x *FileChunk) Reset() { + *x = FileChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileChunk) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *FileChunk) Reset() { *m = FileChunk{} } -func (m *FileChunk) String() string { return proto.CompactTextString(m) } -func (*FileChunk) ProtoMessage() {} -func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*FileChunk) ProtoMessage() {} -func (m *FileChunk) GetFileId() string { - if m != nil { - return m.FileId +func (x *FileChunk) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileChunk.ProtoReflect.Descriptor instead. +func (*FileChunk) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{7} +} + +func (x *FileChunk) GetFileId() string { + if x != nil { + return x.FileId } return "" } -func (m *FileChunk) GetOffset() int64 { - if m != nil { - return m.Offset +func (x *FileChunk) GetOffset() int64 { + if x != nil { + return x.Offset } return 0 } -func (m *FileChunk) GetSize() uint64 { - if m != nil { - return m.Size +func (x *FileChunk) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *FileChunk) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *FileChunk) GetMtime() int64 { + if x != nil { + return x.Mtime } return 0 } -func (m *FileChunk) GetETag() string { - if m != nil { - return m.ETag +func (x *FileChunk) GetETag() string { + if x != nil { + return x.ETag } return "" } -func (m *FileChunk) GetSourceFileId() string { - if m != nil { - return m.SourceFileId +func (x *FileChunk) GetSourceFileId() string { + if x != nil { + return x.SourceFileId } return "" } -func (m *FileChunk) GetFid() *FileId { - if m != nil { - return m.Fid +func (x *FileChunk) GetFid() *FileId { + if x != nil { + return x.Fid + } + return nil +} + +func (x *FileChunk) GetSourceFid() *FileId { + if x != nil { + return x.SourceFid } return nil } -func (m *FileChunk) GetSourceFid() *FileId { - if m != nil { - return m.SourceFid +func (x *FileChunk) GetCipherKey() []byte { + if x != nil { + return x.CipherKey + } + return nil +} + +func (x *FileChunk) GetIsCompressed() bool { + if x != nil { + return x.IsCompressed + } + return false +} + +func (x *FileChunk) GetIsChunkManifest() bool { + if x != nil { + return x.IsChunkManifest + } + return false +} + +type FileChunkManifest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Chunks []*FileChunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` +} + +func (x *FileChunkManifest) Reset() { + *x = FileChunkManifest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileChunkManifest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileChunkManifest) ProtoMessage() {} + +func (x *FileChunkManifest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileChunkManifest.ProtoReflect.Descriptor instead. +func (*FileChunkManifest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{8} +} + +func (x *FileChunkManifest) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks } return nil } type FileId struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` - Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie" json:"cookie,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"` } -func (m *FileId) Reset() { *m = FileId{} } -func (m *FileId) String() string { return proto.CompactTextString(m) } -func (*FileId) ProtoMessage() {} -func (*FileId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (x *FileId) Reset() { + *x = FileId{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileId) ProtoMessage() {} + +func (x *FileId) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileId.ProtoReflect.Descriptor instead. +func (*FileId) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{9} +} -func (m *FileId) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *FileId) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *FileId) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *FileId) GetFileKey() uint64 { + if x != nil { + return x.FileKey } return 0 } -func (m *FileId) GetCookie() uint32 { - if m != nil { - return m.Cookie +func (x *FileId) GetCookie() uint32 { + if x != nil { + return x.Cookie } return 0 } type FuseAttributes struct { - FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` - Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` - FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"` - Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"` - Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"` - Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"` - Replication string `protobuf:"bytes,8,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,9,opt,name=collection" json:"collection,omitempty"` - TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"` - GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"` - SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"` -} - -func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } -func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } -func (*FuseAttributes) ProtoMessage() {} -func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *FuseAttributes) GetFileSize() uint64 { - if m != nil { - return m.FileSize + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds + FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` + Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"` + Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds + Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"` + Replication string `protobuf:"bytes,8,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,9,opt,name=collection,proto3" json:"collection,omitempty"` + TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs + GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs + SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"` + Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *FuseAttributes) Reset() { + *x = FuseAttributes{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FuseAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FuseAttributes) ProtoMessage() {} + +func (x *FuseAttributes) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FuseAttributes.ProtoReflect.Descriptor instead. +func (*FuseAttributes) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{10} +} + +func (x *FuseAttributes) GetFileSize() uint64 { + if x != nil { + return x.FileSize } return 0 } -func (m *FuseAttributes) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *FuseAttributes) GetMtime() int64 { + if x != nil { + return x.Mtime } return 0 } -func (m *FuseAttributes) GetFileMode() uint32 { - if m != nil { - return m.FileMode +func (x *FuseAttributes) GetFileMode() uint32 { + if x != nil { + return x.FileMode } return 0 } -func (m *FuseAttributes) GetUid() uint32 { - if m != nil { - return m.Uid +func (x *FuseAttributes) GetUid() uint32 { + if x != nil { + return x.Uid } return 0 } -func (m *FuseAttributes) GetGid() uint32 { - if m != nil { - return m.Gid +func (x *FuseAttributes) GetGid() uint32 { + if x != nil { + return x.Gid } return 0 } -func (m *FuseAttributes) GetCrtime() int64 { - if m != nil { - return m.Crtime +func (x *FuseAttributes) GetCrtime() int64 { + if x != nil { + return x.Crtime } return 0 } -func (m *FuseAttributes) GetMime() string { - if m != nil { - return m.Mime +func (x *FuseAttributes) GetMime() string { + if x != nil { + return x.Mime } return "" } -func (m *FuseAttributes) GetReplication() string { - if m != nil { - return m.Replication +func (x *FuseAttributes) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *FuseAttributes) GetCollection() string { - if m != nil { - return m.Collection +func (x *FuseAttributes) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *FuseAttributes) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *FuseAttributes) GetTtlSec() int32 { + if x != nil { + return x.TtlSec } return 0 } -func (m *FuseAttributes) GetUserName() string { - if m != nil { - return m.UserName +func (x *FuseAttributes) GetUserName() string { + if x != nil { + return x.UserName + } + return "" +} + +func (x *FuseAttributes) GetGroupName() []string { + if x != nil { + return x.GroupName + } + return nil +} + +func (x *FuseAttributes) GetSymlinkTarget() string { + if x != nil { + return x.SymlinkTarget } return "" } -func (m *FuseAttributes) GetGroupName() []string { - if m != nil { - return m.GroupName +func (x *FuseAttributes) GetMd5() []byte { + if x != nil { + return x.Md5 } return nil } -func (m *FuseAttributes) GetSymlinkTarget() string { - if m != nil { - return m.SymlinkTarget +func (x *FuseAttributes) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type CreateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` +} + +func (x *CreateEntryRequest) Reset() { + *x = CreateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } -func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*CreateEntryRequest) ProtoMessage() {} -func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *CreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEntryRequest) ProtoMessage() {} -func (m *CreateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead. +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *CreateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *CreateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry + } + return nil +} + +func (x *CreateEntryRequest) GetOExcl() bool { + if x != nil { + return x.OExcl + } + return false +} + +func (x *CreateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + +func (x *CreateEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures } return nil } type CreateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CreateEntryResponse) Reset() { + *x = CreateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEntryResponse) ProtoMessage() {} + +func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } -func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*CreateEntryResponse) ProtoMessage() {} -func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +// Deprecated: Use CreateEntryResponse.ProtoReflect.Descriptor instead. +func (*CreateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateEntryResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} type UpdateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,4,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } -func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } -func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryRequest) ProtoMessage() {} -func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *UpdateEntryRequest) Reset() { + *x = UpdateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEntryRequest) ProtoMessage() {} + +func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEntryRequest.ProtoReflect.Descriptor instead. +func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{13} +} -func (m *UpdateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *UpdateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *UpdateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *UpdateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry + } + return nil +} + +func (x *UpdateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + +func (x *UpdateEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures } return nil } type UpdateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateEntryResponse) Reset() { + *x = UpdateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEntryResponse) ProtoMessage() {} + +func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEntryResponse.ProtoReflect.Descriptor instead. +func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{14} +} + +type AppendToEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` +} + +func (x *AppendToEntryRequest) Reset() { + *x = AppendToEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendToEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } -func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryResponse) ProtoMessage() {} -func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*AppendToEntryRequest) ProtoMessage() {} + +func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendToEntryRequest.ProtoReflect.Descriptor instead. +func (*AppendToEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{15} +} + +func (x *AppendToEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" +} + +func (x *AppendToEntryRequest) GetEntryName() string { + if x != nil { + return x.EntryName + } + return "" +} + +func (x *AppendToEntryRequest) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks + } + return nil +} + +type AppendToEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AppendToEntryResponse) Reset() { + *x = AppendToEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendToEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendToEntryResponse) ProtoMessage() {} + +func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendToEntryResponse.ProtoReflect.Descriptor instead. +func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{16} +} type DeleteEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // bool is_directory = 3; - IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"` - IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"` - IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError" json:"ignore_recursive_error,omitempty"` + IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"` + IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"` + IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,8,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } -func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } -func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryRequest) ProtoMessage() {} -func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *DeleteEntryRequest) Reset() { + *x = DeleteEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *DeleteEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (*DeleteEntryRequest) ProtoMessage() {} + +func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteEntryRequest.ProtoReflect.Descriptor instead. +func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{17} +} + +func (x *DeleteEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *DeleteEntryRequest) GetName() string { - if m != nil { - return m.Name +func (x *DeleteEntryRequest) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *DeleteEntryRequest) GetIsDeleteData() bool { - if m != nil { - return m.IsDeleteData +func (x *DeleteEntryRequest) GetIsDeleteData() bool { + if x != nil { + return x.IsDeleteData + } + return false +} + +func (x *DeleteEntryRequest) GetIsRecursive() bool { + if x != nil { + return x.IsRecursive } return false } -func (m *DeleteEntryRequest) GetIsRecursive() bool { - if m != nil { - return m.IsRecursive +func (x *DeleteEntryRequest) GetIgnoreRecursiveError() bool { + if x != nil { + return x.IgnoreRecursiveError } return false } -func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool { - if m != nil { - return m.IgnoreRecursiveError +func (x *DeleteEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster } return false } +func (x *DeleteEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures + } + return nil +} + type DeleteEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *DeleteEntryResponse) Reset() { + *x = DeleteEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } -func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryResponse) ProtoMessage() {} -func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *DeleteEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteEntryResponse) ProtoMessage() {} + +func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteEntryResponse.ProtoReflect.Descriptor instead. +func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{18} +} + +func (x *DeleteEntryResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} type AtomicRenameEntryRequest struct { - OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` - OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` - NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"` - NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` +} + +func (x *AtomicRenameEntryRequest) Reset() { + *x = AtomicRenameEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AtomicRenameEntryRequest) ProtoMessage() {} + +func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} } -func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) } -func (*AtomicRenameEntryRequest) ProtoMessage() {} -func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +// Deprecated: Use AtomicRenameEntryRequest.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{19} +} -func (m *AtomicRenameEntryRequest) GetOldDirectory() string { - if m != nil { - return m.OldDirectory +func (x *AtomicRenameEntryRequest) GetOldDirectory() string { + if x != nil { + return x.OldDirectory } return "" } -func (m *AtomicRenameEntryRequest) GetOldName() string { - if m != nil { - return m.OldName +func (x *AtomicRenameEntryRequest) GetOldName() string { + if x != nil { + return x.OldName } return "" } -func (m *AtomicRenameEntryRequest) GetNewDirectory() string { - if m != nil { - return m.NewDirectory +func (x *AtomicRenameEntryRequest) GetNewDirectory() string { + if x != nil { + return x.NewDirectory } return "" } -func (m *AtomicRenameEntryRequest) GetNewName() string { - if m != nil { - return m.NewName +func (x *AtomicRenameEntryRequest) GetNewName() string { + if x != nil { + return x.NewName } return "" } type AtomicRenameEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AtomicRenameEntryResponse) Reset() { + *x = AtomicRenameEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} } -func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) } -func (*AtomicRenameEntryResponse) ProtoMessage() {} -func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*AtomicRenameEntryResponse) ProtoMessage() {} + +func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AtomicRenameEntryResponse.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{20} +} type AssignVolumeRequest struct { - Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` - TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + DiskType string `protobuf:"bytes,8,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *AssignVolumeRequest) Reset() { + *x = AssignVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (x *AssignVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeRequest) ProtoMessage() {} -func (m *AssignVolumeRequest) GetCount() int32 { - if m != nil { - return m.Count +func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignVolumeRequest.ProtoReflect.Descriptor instead. +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{21} +} + +func (x *AssignVolumeRequest) GetCount() int32 { + if x != nil { + return x.Count } return 0 } -func (m *AssignVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *AssignVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *AssignVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *AssignVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *AssignVolumeRequest) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *AssignVolumeRequest) GetTtlSec() int32 { + if x != nil { + return x.TtlSec } return 0 } -func (m *AssignVolumeRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *AssignVolumeRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter + } + return "" +} + +func (x *AssignVolumeRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *AssignVolumeRequest) GetRack() string { + if x != nil { + return x.Rack + } + return "" +} + +func (x *AssignVolumeRequest) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type AssignVolumeResponse struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *AssignVolumeResponse) Reset() { + *x = AssignVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeResponse) ProtoMessage() {} + +func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +// Deprecated: Use AssignVolumeResponse.ProtoReflect.Descriptor instead. +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{22} +} -func (m *AssignVolumeResponse) GetFileId() string { - if m != nil { - return m.FileId +func (x *AssignVolumeResponse) GetFileId() string { + if x != nil { + return x.FileId } return "" } -func (m *AssignVolumeResponse) GetUrl() string { - if m != nil { - return m.Url +func (x *AssignVolumeResponse) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *AssignVolumeResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *AssignVolumeResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *AssignVolumeResponse) GetCount() int32 { - if m != nil { - return m.Count +func (x *AssignVolumeResponse) GetCount() int32 { + if x != nil { + return x.Count } return 0 } -func (m *AssignVolumeResponse) GetAuth() string { - if m != nil { - return m.Auth +func (x *AssignVolumeResponse) GetAuth() string { + if x != nil { + return x.Auth + } + return "" +} + +func (x *AssignVolumeResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignVolumeResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignVolumeResponse) GetError() string { + if x != nil { + return x.Error } return "" } type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` +} + +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*LookupVolumeRequest) ProtoMessage() {} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{23} +} + +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds } return nil } type Locations struct { - Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` } -func (m *Locations) Reset() { *m = Locations{} } -func (m *Locations) String() string { return proto.CompactTextString(m) } -func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (x *Locations) Reset() { + *x = Locations{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Locations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Locations) ProtoMessage() {} + +func (x *Locations) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Locations.ProtoReflect.Descriptor instead. +func (*Locations) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{24} +} -func (m *Locations) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *Locations) GetLocations() []*Location { + if x != nil { + return x.Locations } return nil } type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Location) GetUrl() string { - if m != nil { - return m.Url +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{25} +} + +func (x *Location) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } type LookupVolumeResponse struct { - LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeResponse) ProtoMessage() {} + +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{26} +} + +func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { + if x != nil { + return x.LocationsMap + } + return nil +} + +type Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Collection) Reset() { + *x = Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collection) ProtoMessage() {} + +func (x *Collection) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collection.ProtoReflect.Descriptor instead. +func (*Collection) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{27} +} + +func (x *Collection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` +} + +func (x *CollectionListRequest) Reset() { + *x = CollectionListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListRequest) ProtoMessage() {} + +func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. +func (*CollectionListRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{28} +} + +func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { + if x != nil { + return x.IncludeNormalVolumes + } + return false +} + +func (x *CollectionListRequest) GetIncludeEcVolumes() bool { + if x != nil { + return x.IncludeEcVolumes + } + return false +} + +type CollectionListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` +} + +func (x *CollectionListResponse) Reset() { + *x = CollectionListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListResponse) ProtoMessage() {} + +func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. +func (*CollectionListResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{29} +} -func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { - if m != nil { - return m.LocationsMap +func (x *CollectionListResponse) GetCollections() []*Collection { + if x != nil { + return x.Collections } return nil } type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionRequest) ProtoMessage() {} + +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{30} +} -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{31} +} type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsRequest) ProtoMessage() {} + +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{32} +} + +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *StatisticsRequest) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{33} +} + +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +type GetFilerConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFilerConfigurationRequest) Reset() { + *x = GetFilerConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationRequest) ProtoMessage() {} + +func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{34} +} + +type GetFilerConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"` + Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"` + MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"` +} + +func (x *GetFilerConfigurationResponse) Reset() { + *x = GetFilerConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationResponse) ProtoMessage() {} + +func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{35} +} -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *GetFilerConfigurationResponse) GetMasters() []string { + if x != nil { + return x.Masters + } + return nil +} + +func (x *GetFilerConfigurationResponse) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *GetFilerConfigurationResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetMaxMb() uint32 { + if x != nil { + return x.MaxMb + } + return 0 +} + +func (x *GetFilerConfigurationResponse) GetDirBuckets() string { + if x != nil { + return x.DirBuckets } return "" } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *GetFilerConfigurationResponse) GetCipher() bool { + if x != nil { + return x.Cipher + } + return false +} + +func (x *GetFilerConfigurationResponse) GetSignature() int32 { + if x != nil { + return x.Signature + } + return 0 +} + +func (x *GetFilerConfigurationResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress } return "" } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize +func (x *GetFilerConfigurationResponse) GetMetricsIntervalSec() int32 { + if x != nil { + return x.MetricsIntervalSec } return 0 } -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize +type SubscribeMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` + PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` + SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + Signature int32 `protobuf:"varint,4,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SubscribeMetadataRequest) Reset() { + *x = SubscribeMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeMetadataRequest) ProtoMessage() {} + +func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataRequest.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{36} +} + +func (x *SubscribeMetadataRequest) GetClientName() string { + if x != nil { + return x.ClientName + } + return "" +} + +func (x *SubscribeMetadataRequest) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +func (x *SubscribeMetadataRequest) GetSinceNs() int64 { + if x != nil { + return x.SinceNs } return 0 } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *SubscribeMetadataRequest) GetSignature() int32 { + if x != nil { + return x.Signature } return 0 } -type GetFilerConfigurationRequest struct { +type SubscribeMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` + TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` +} + +func (x *SubscribeMetadataResponse) Reset() { + *x = SubscribeMetadataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} } -func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) } -func (*GetFilerConfigurationRequest) ProtoMessage() {} -func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (x *SubscribeMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -type GetFilerConfigurationResponse struct { - Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` +func (*SubscribeMetadataResponse) ProtoMessage() {} + +func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataResponse.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{37} +} + +func (x *SubscribeMetadataResponse) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" +} + +func (x *SubscribeMetadataResponse) GetEventNotification() *EventNotification { + if x != nil { + return x.EventNotification + } + return nil +} + +func (x *SubscribeMetadataResponse) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` + PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{38} +} + +func (x *LogEntry) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +func (x *LogEntry) GetPartitionKeyHash() int32 { + if x != nil { + return x.PartitionKeyHash + } + return 0 +} + +func (x *LogEntry) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type KeepConnectedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` + Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedRequest) ProtoMessage() {} + +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{39} +} + +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + +func (x *KeepConnectedRequest) GetResources() []string { + if x != nil { + return x.Resources + } + return nil +} + +type KeepConnectedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *KeepConnectedResponse) Reset() { + *x = KeepConnectedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedResponse) ProtoMessage() {} + +func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead. +func (*KeepConnectedResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{40} +} + +type LocateBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *LocateBrokerRequest) Reset() { + *x = LocateBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerRequest) ProtoMessage() {} + +func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead. +func (*LocateBrokerRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{41} +} + +func (x *LocateBrokerRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +type LocateBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"` + Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *LocateBrokerResponse) Reset() { + *x = LocateBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerResponse) ProtoMessage() {} + +func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{42} +} + +func (x *LocateBrokerResponse) GetFound() bool { + if x != nil { + return x.Found + } + return false +} + +func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource { + if x != nil { + return x.Resources + } + return nil +} + +// Key-Value operations +type KvGetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *KvGetRequest) Reset() { + *x = KvGetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvGetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvGetRequest) ProtoMessage() {} + +func (x *KvGetRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvGetRequest.ProtoReflect.Descriptor instead. +func (*KvGetRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{43} +} + +func (x *KvGetRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +type KvGetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *KvGetResponse) Reset() { + *x = KvGetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvGetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvGetResponse) ProtoMessage() {} + +func (x *KvGetResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } -func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetFilerConfigurationResponse) ProtoMessage() {} -func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +// Deprecated: Use KvGetResponse.ProtoReflect.Descriptor instead. +func (*KvGetResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{44} +} -func (m *GetFilerConfigurationResponse) GetMasters() []string { - if m != nil { - return m.Masters +func (x *KvGetResponse) GetValue() []byte { + if x != nil { + return x.Value } return nil } -func (m *GetFilerConfigurationResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *KvGetResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type KvPutRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KvPutRequest) Reset() { + *x = KvPutRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvPutRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvPutRequest) ProtoMessage() {} + +func (x *KvPutRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvPutRequest.ProtoReflect.Descriptor instead. +func (*KvPutRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{45} +} + +func (x *KvPutRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KvPutRequest) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type KvPutResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *KvPutResponse) Reset() { + *x = KvPutResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvPutResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvPutResponse) ProtoMessage() {} + +func (x *KvPutResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvPutResponse.ProtoReflect.Descriptor instead. +func (*KvPutResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{46} +} + +func (x *KvPutResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// path-based configurations +type FilerConf struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` +} + +func (x *FilerConf) Reset() { + *x = FilerConf{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilerConf) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilerConf) ProtoMessage() {} + +func (x *FilerConf) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilerConf.ProtoReflect.Descriptor instead. +func (*FilerConf) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{47} +} + +func (x *FilerConf) GetVersion() int32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *FilerConf) GetLocations() []*FilerConf_PathConf { + if x != nil { + return x.Locations + } + return nil +} + +// if found, send the exact address +// if not found, send the full list of existing brokers +type LocateBrokerResponse_Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` + ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"` +} + +func (x *LocateBrokerResponse_Resource) Reset() { + *x = LocateBrokerResponse_Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerResponse_Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerResponse_Resource) ProtoMessage() {} + +func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{42, 0} +} + +func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string { + if x != nil { + return x.GrpcAddresses + } + return "" +} + +func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 { + if x != nil { + return x.ResourceCount + } + return 0 +} + +type FilerConf_PathConf struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,5,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` + Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"` + VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"` +} + +func (x *FilerConf_PathConf) Reset() { + *x = FilerConf_PathConf{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilerConf_PathConf) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilerConf_PathConf) ProtoMessage() {} + +func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead. +func (*FilerConf_PathConf) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{47, 0} +} + +func (x *FilerConf_PathConf) GetLocationPrefix() string { + if x != nil { + return x.LocationPrefix + } + return "" +} + +func (x *FilerConf_PathConf) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *FilerConf_PathConf) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *FilerConf_PathConf) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *GetFilerConfigurationResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *FilerConf_PathConf) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } -func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 { - if m != nil { - return m.MaxMb +func (x *FilerConf_PathConf) GetFsync() bool { + if x != nil { + return x.Fsync + } + return false +} + +func (x *FilerConf_PathConf) GetVolumeGrowthCount() uint32 { + if x != nil { + return x.VolumeGrowthCount } return 0 } -func init() { - proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") - proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") - proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") - proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") - proto.RegisterType((*Entry)(nil), "filer_pb.Entry") - proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry") - proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") - proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") - proto.RegisterType((*FileId)(nil), "filer_pb.FileId") - proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") - proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest") - proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse") - proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest") - proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") - proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") - proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") - proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest") - proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") - proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") - proto.RegisterType((*Locations)(nil), "filer_pb.Locations") - proto.RegisterType((*Location)(nil), "filer_pb.Location") - proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse") - proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") - proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest") - proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse") +var File_filer_proto protoreflect.FileDescriptor + +var file_filer_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x85, + 0x03, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, + 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x69, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, + 0x6b, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, + 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, + 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x8f, 0x02, 0x0a, + 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69, + 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, + 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6, + 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x49, 0x64, 0x12, 0x22, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, + 0x64, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x66, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x69, + 0x73, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x06, + 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c, + 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x22, 0x9d, 0x03, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69, + 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, + 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, + 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x22, 0xc3, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x15, 0x0a, 0x06, 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x6f, 0x45, 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, + 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, + 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, + 0x17, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02, 0x0a, 0x12, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, + 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x13, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, + 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, + 0x53, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x34, + 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x73, 0x22, 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x30, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x3b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, + 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, + 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x36, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, + 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, + 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, + 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x02, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, + 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x69, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x95, 0x01, + 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, + 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, + 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, + 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, + 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, + 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x4b, 0x76, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xce, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x52, + 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x08, 0x50, + 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, + 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, + 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a, + 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, + 0x0a, 0x05, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, + 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, + 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, + 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_filer_proto_rawDescOnce sync.Once + file_filer_proto_rawDescData = file_filer_proto_rawDesc +) + +func file_filer_proto_rawDescGZIP() []byte { + file_filer_proto_rawDescOnce.Do(func() { + file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData) + }) + return file_filer_proto_rawDescData +} + +var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 52) +var file_filer_proto_goTypes = []interface{}{ + (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest + (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse + (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse + (*Entry)(nil), // 4: filer_pb.Entry + (*FullEntry)(nil), // 5: filer_pb.FullEntry + (*EventNotification)(nil), // 6: filer_pb.EventNotification + (*FileChunk)(nil), // 7: filer_pb.FileChunk + (*FileChunkManifest)(nil), // 8: filer_pb.FileChunkManifest + (*FileId)(nil), // 9: filer_pb.FileId + (*FuseAttributes)(nil), // 10: filer_pb.FuseAttributes + (*CreateEntryRequest)(nil), // 11: filer_pb.CreateEntryRequest + (*CreateEntryResponse)(nil), // 12: filer_pb.CreateEntryResponse + (*UpdateEntryRequest)(nil), // 13: filer_pb.UpdateEntryRequest + (*UpdateEntryResponse)(nil), // 14: filer_pb.UpdateEntryResponse + (*AppendToEntryRequest)(nil), // 15: filer_pb.AppendToEntryRequest + (*AppendToEntryResponse)(nil), // 16: filer_pb.AppendToEntryResponse + (*DeleteEntryRequest)(nil), // 17: filer_pb.DeleteEntryRequest + (*DeleteEntryResponse)(nil), // 18: filer_pb.DeleteEntryResponse + (*AtomicRenameEntryRequest)(nil), // 19: filer_pb.AtomicRenameEntryRequest + (*AtomicRenameEntryResponse)(nil), // 20: filer_pb.AtomicRenameEntryResponse + (*AssignVolumeRequest)(nil), // 21: filer_pb.AssignVolumeRequest + (*AssignVolumeResponse)(nil), // 22: filer_pb.AssignVolumeResponse + (*LookupVolumeRequest)(nil), // 23: filer_pb.LookupVolumeRequest + (*Locations)(nil), // 24: filer_pb.Locations + (*Location)(nil), // 25: filer_pb.Location + (*LookupVolumeResponse)(nil), // 26: filer_pb.LookupVolumeResponse + (*Collection)(nil), // 27: filer_pb.Collection + (*CollectionListRequest)(nil), // 28: filer_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 29: filer_pb.CollectionListResponse + (*DeleteCollectionRequest)(nil), // 30: filer_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 31: filer_pb.DeleteCollectionResponse + (*StatisticsRequest)(nil), // 32: filer_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 33: filer_pb.StatisticsResponse + (*GetFilerConfigurationRequest)(nil), // 34: filer_pb.GetFilerConfigurationRequest + (*GetFilerConfigurationResponse)(nil), // 35: filer_pb.GetFilerConfigurationResponse + (*SubscribeMetadataRequest)(nil), // 36: filer_pb.SubscribeMetadataRequest + (*SubscribeMetadataResponse)(nil), // 37: filer_pb.SubscribeMetadataResponse + (*LogEntry)(nil), // 38: filer_pb.LogEntry + (*KeepConnectedRequest)(nil), // 39: filer_pb.KeepConnectedRequest + (*KeepConnectedResponse)(nil), // 40: filer_pb.KeepConnectedResponse + (*LocateBrokerRequest)(nil), // 41: filer_pb.LocateBrokerRequest + (*LocateBrokerResponse)(nil), // 42: filer_pb.LocateBrokerResponse + (*KvGetRequest)(nil), // 43: filer_pb.KvGetRequest + (*KvGetResponse)(nil), // 44: filer_pb.KvGetResponse + (*KvPutRequest)(nil), // 45: filer_pb.KvPutRequest + (*KvPutResponse)(nil), // 46: filer_pb.KvPutResponse + (*FilerConf)(nil), // 47: filer_pb.FilerConf + nil, // 48: filer_pb.Entry.ExtendedEntry + nil, // 49: filer_pb.LookupVolumeResponse.LocationsMapEntry + (*LocateBrokerResponse_Resource)(nil), // 50: filer_pb.LocateBrokerResponse.Resource + (*FilerConf_PathConf)(nil), // 51: filer_pb.FilerConf.PathConf +} +var file_filer_proto_depIdxs = []int32{ + 4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry + 4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry + 7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk + 10, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes + 48, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry + 4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry + 4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry + 4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry + 9, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId + 9, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId + 7, // 10: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk + 4, // 11: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry + 4, // 12: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry + 7, // 13: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk + 25, // 14: filer_pb.Locations.locations:type_name -> filer_pb.Location + 49, // 15: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry + 27, // 16: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection + 6, // 17: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification + 50, // 18: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource + 51, // 19: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf + 24, // 20: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations + 0, // 21: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest + 2, // 22: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest + 11, // 23: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest + 13, // 24: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest + 15, // 25: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest + 17, // 26: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest + 19, // 27: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest + 21, // 28: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest + 23, // 29: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest + 28, // 30: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest + 30, // 31: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest + 32, // 32: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest + 34, // 33: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest + 36, // 34: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 36, // 35: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 39, // 36: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest + 41, // 37: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest + 43, // 38: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest + 45, // 39: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest + 1, // 40: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse + 3, // 41: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse + 12, // 42: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse + 14, // 43: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse + 16, // 44: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse + 18, // 45: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse + 20, // 46: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse + 22, // 47: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse + 26, // 48: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse + 29, // 49: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse + 31, // 50: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse + 33, // 51: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse + 35, // 52: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse + 37, // 53: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 37, // 54: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 40, // 55: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse + 42, // 56: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse + 44, // 57: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse + 46, // 58: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse + 40, // [40:59] is the sub-list for method output_type + 21, // [21:40] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_filer_proto_init() } +func file_filer_proto_init() { + if File_filer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunkManifest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FuseAttributes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse_Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf_PathConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_filer_proto_rawDesc, + NumEnums: 0, + NumMessages: 52, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_filer_proto_goTypes, + DependencyIndexes: file_filer_proto_depIdxs, + MessageInfos: file_filer_proto_msgTypes, + }.Build() + File_filer_proto = out.File + file_filer_proto_rawDesc = nil + file_filer_proto_goTypes = nil + file_filer_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for SeaweedFiler service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedFilerClient is the client API for SeaweedFiler service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) + AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) + CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) + SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) + KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) + LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) + KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) + KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) } type seaweedFilerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedFilerClient(cc *grpc.ClientConn) SeaweedFilerClient { +func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient { return &seaweedFilerClient{cc} } func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) { out := new(LookupDirectoryEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...) if err != nil { return nil, err } @@ -1066,7 +4537,7 @@ func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *Looku } func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], c.cc, "/filer_pb.SeaweedFiler/ListEntries", opts...) + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } @@ -1099,7 +4570,7 @@ func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { out := new(CreateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...) if err != nil { return nil, err } @@ -1108,7 +4579,16 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) { out := new(UpdateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) { + out := new(AppendToEntryResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...) if err != nil { return nil, err } @@ -1117,7 +4597,7 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) { out := new(DeleteEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...) if err != nil { return nil, err } @@ -1126,7 +4606,7 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { out := new(AtomicRenameEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...) if err != nil { return nil, err } @@ -1135,7 +4615,7 @@ func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRe func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...) if err != nil { return nil, err } @@ -1144,7 +4624,16 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { + out := new(CollectionListResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -1153,7 +4642,7 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -1162,7 +4651,7 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -1171,27 +4660,218 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) { out := new(GetFilerConfigurationResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for SeaweedFiler service +func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} +func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeLocalMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeLocalMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeLocalMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[3], "/filer_pb.SeaweedFiler/KeepConnected", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerKeepConnectedClient{stream} + return x, nil +} + +type SeaweedFiler_KeepConnectedClient interface { + Send(*KeepConnectedRequest) error + Recv() (*KeepConnectedResponse, error) + grpc.ClientStream +} + +type seaweedFilerKeepConnectedClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) { + m := new(KeepConnectedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) { + out := new(LocateBrokerResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) { + out := new(KvGetResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) { + out := new(KvPutResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvPut", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedFilerServer is the server API for SeaweedFiler service. type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) + AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) + CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error + SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error + KeepConnected(SeaweedFiler_KeepConnectedServer) error + LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) + KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) + KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) +} + +// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedFilerServer struct { +} + +func (*UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error { + return status.Errorf(codes.Unimplemented, "method ListEntries not implemented") +} +func (*UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendToEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AssignVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedSeaweedFilerServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented") +} +func (*UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method KvGet not implemented") +} +func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented") } func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { @@ -1273,6 +4953,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendToEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteEntryRequest) if err := dec(in); err != nil { @@ -1345,6 +5043,24 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).CollectionList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/CollectionList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).CollectionList(ctx, req.(*CollectionListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteCollectionRequest) if err := dec(in); err != nil { @@ -1399,6 +5115,128 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeLocalMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeLocalMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream}) +} + +type SeaweedFiler_KeepConnectedServer interface { + Send(*KeepConnectedResponse) error + Recv() (*KeepConnectedRequest, error) + grpc.ServerStream +} + +type seaweedFilerKeepConnectedServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocateBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).LocateBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/LocateBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KvGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).KvGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/KvGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).KvGet(ctx, req.(*KvGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KvPutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).KvPut(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/KvPut", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).KvPut(ctx, req.(*KvPutRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ ServiceName: "filer_pb.SeaweedFiler", HandlerType: (*SeaweedFilerServer)(nil), @@ -1416,6 +5254,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_UpdateEntry_Handler, }, { + MethodName: "AppendToEntry", + Handler: _SeaweedFiler_AppendToEntry_Handler, + }, + { MethodName: "DeleteEntry", Handler: _SeaweedFiler_DeleteEntry_Handler, }, @@ -1432,6 +5274,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_LookupVolume_Handler, }, { + MethodName: "CollectionList", + Handler: _SeaweedFiler_CollectionList_Handler, + }, + { MethodName: "DeleteCollection", Handler: _SeaweedFiler_DeleteCollection_Handler, }, @@ -1443,6 +5289,18 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "GetFilerConfiguration", Handler: _SeaweedFiler_GetFilerConfiguration_Handler, }, + { + MethodName: "LocateBroker", + Handler: _SeaweedFiler_LocateBroker_Handler, + }, + { + MethodName: "KvGet", + Handler: _SeaweedFiler_KvGet_Handler, + }, + { + MethodName: "KvPut", + Handler: _SeaweedFiler_KvPut_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1450,113 +5308,22 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_ListEntries_Handler, ServerStreams: true, }, + { + StreamName: "SubscribeMetadata", + Handler: _SeaweedFiler_SubscribeMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeLocalMetadata", + Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "KeepConnected", + Handler: _SeaweedFiler_KeepConnected_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "filer.proto", } - -func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1603 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0x8f, 0xf7, 0xdb, 0x6f, 0x77, 0xdb, 0x64, 0x92, 0xb6, 0xdb, 0xcd, 0x07, 0xa9, 0x43, 0x4b, - 0x10, 0x55, 0xa8, 0x42, 0x0f, 0x2d, 0x85, 0x43, 0x9b, 0x0f, 0x14, 0x91, 0x7e, 0xc8, 0x69, 0x11, - 0x08, 0x09, 0xcb, 0xb1, 0x67, 0x37, 0x43, 0x6c, 0xcf, 0x32, 0x1e, 0x27, 0x29, 0x7f, 0x02, 0x47, - 0x8e, 0x48, 0x9c, 0xf9, 0x27, 0x10, 0x17, 0x84, 0xf8, 0x6f, 0x38, 0x72, 0x46, 0x33, 0x63, 0x7b, - 0xc7, 0xeb, 0x4d, 0xd2, 0x0a, 0xf5, 0xe6, 0x79, 0xdf, 0xef, 0xcd, 0x7b, 0xbf, 0x37, 0xbb, 0xd0, - 0x1e, 0x90, 0x00, 0xb3, 0x8d, 0x11, 0xa3, 0x9c, 0xa2, 0x96, 0x3c, 0x38, 0xa3, 0x43, 0xeb, 0x39, - 0x2c, 0xee, 0x53, 0x7a, 0x9c, 0x8c, 0xb6, 0x09, 0xc3, 0x1e, 0xa7, 0xec, 0xf5, 0x4e, 0xc4, 0xd9, - 0x6b, 0x1b, 0xff, 0x90, 0xe0, 0x98, 0xa3, 0x25, 0x30, 0xfd, 0x8c, 0xd1, 0x33, 0x56, 0x8d, 0x75, - 0xd3, 0x1e, 0x13, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x57, 0x91, 0x0c, 0xf9, 0x6d, 0xed, 0xc0, - 0xd2, 0x74, 0x83, 0xf1, 0x88, 0x46, 0x31, 0x46, 0xb7, 0xa1, 0x8e, 0x05, 0x41, 0x5a, 0x6b, 0x6f, - 0x5e, 0xdd, 0xc8, 0x42, 0xd9, 0x50, 0x72, 0x8a, 0x6b, 0xfd, 0x61, 0x00, 0xda, 0x27, 0x31, 0x17, - 0x44, 0x82, 0xe3, 0x37, 0x8b, 0xe7, 0x3a, 0x34, 0x46, 0x0c, 0x0f, 0xc8, 0x59, 0x1a, 0x51, 0x7a, - 0x42, 0x77, 0x61, 0x2e, 0xe6, 0x2e, 0xe3, 0xbb, 0x8c, 0x86, 0xbb, 0x24, 0xc0, 0xcf, 0x44, 0xd0, - 0x55, 0x29, 0x52, 0x66, 0xa0, 0x0d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, 0x27, 0xf8, 0x20, 0xe3, - 0xf6, 0x6a, 0xab, 0xc6, 0x7a, 0xcb, 0x9e, 0xc2, 0x41, 0x0b, 0x50, 0x0f, 0x48, 0x48, 0x78, 0xaf, - 0xbe, 0x6a, 0xac, 0x77, 0x6d, 0x75, 0xb0, 0x3e, 0x83, 0xf9, 0x42, 0xfc, 0x6f, 0x97, 0xfe, 0xaf, - 0x15, 0xa8, 0x4b, 0x42, 0x5e, 0x63, 0x63, 0x5c, 0x63, 0x74, 0x0b, 0x3a, 0x24, 0x76, 0xc6, 0x85, - 0xa8, 0xc8, 0xd8, 0xda, 0x24, 0xce, 0x6b, 0x8e, 0x3e, 0x82, 0x86, 0x77, 0x94, 0x44, 0xc7, 0x71, - 0xaf, 0xba, 0x5a, 0x5d, 0x6f, 0x6f, 0xce, 0x8f, 0x1d, 0x89, 0x44, 0xb7, 0x04, 0xcf, 0x4e, 0x45, - 0xd0, 0x03, 0x00, 0x97, 0x73, 0x46, 0x0e, 0x13, 0x8e, 0x63, 0x99, 0x69, 0x7b, 0xb3, 0xa7, 0x29, - 0x24, 0x31, 0x7e, 0x9c, 0xf3, 0x6d, 0x4d, 0x16, 0x3d, 0x84, 0x16, 0x3e, 0xe3, 0x38, 0xf2, 0xb1, - 0xdf, 0xab, 0x4b, 0x47, 0xcb, 0x13, 0x19, 0x6d, 0xec, 0xa4, 0x7c, 0x95, 0x5f, 0x2e, 0xde, 0x7f, - 0x04, 0xdd, 0x02, 0x0b, 0xcd, 0x42, 0xf5, 0x18, 0x67, 0xb7, 0x2a, 0x3e, 0x45, 0x65, 0x4f, 0xdc, - 0x20, 0x51, 0x0d, 0xd6, 0xb1, 0xd5, 0xe1, 0xd3, 0xca, 0x03, 0xc3, 0xda, 0x06, 0x73, 0x37, 0x09, - 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x1b, 0x57, 0xb9, 0x72, 0x61, 0x95, 0x7f, 0x37, - 0x60, 0x6e, 0xe7, 0x04, 0x47, 0xfc, 0x19, 0xe5, 0x64, 0x40, 0x3c, 0x97, 0x13, 0x1a, 0xa1, 0xbb, - 0x60, 0xd2, 0xc0, 0x77, 0x2e, 0xbc, 0xa6, 0x16, 0x0d, 0xd2, 0xa8, 0xef, 0x82, 0x19, 0xe1, 0x53, - 0xe7, 0x42, 0x77, 0xad, 0x08, 0x9f, 0x2a, 0xe9, 0x35, 0xe8, 0xfa, 0x38, 0xc0, 0x1c, 0x3b, 0xf9, - 0xed, 0x88, 0xab, 0xeb, 0x28, 0xe2, 0x96, 0xba, 0x8e, 0x3b, 0x70, 0x55, 0x98, 0x1c, 0xb9, 0x0c, - 0x47, 0xdc, 0x19, 0xb9, 0xfc, 0x48, 0xde, 0x89, 0x69, 0x77, 0x23, 0x7c, 0xfa, 0x42, 0x52, 0x5f, - 0xb8, 0xfc, 0xc8, 0xfa, 0xd7, 0x00, 0x33, 0xbf, 0x4c, 0x74, 0x03, 0x9a, 0xc2, 0xad, 0x43, 0xfc, - 0xb4, 0x12, 0x0d, 0x71, 0xdc, 0xf3, 0xc5, 0x54, 0xd0, 0xc1, 0x20, 0xc6, 0x5c, 0x86, 0x57, 0xb5, - 0xd3, 0x93, 0xe8, 0xac, 0x98, 0xfc, 0xa8, 0x06, 0xa1, 0x66, 0xcb, 0x6f, 0x51, 0xf1, 0x90, 0x93, - 0x10, 0x4b, 0x87, 0x55, 0x5b, 0x1d, 0xd0, 0x3c, 0xd4, 0xb1, 0xc3, 0xdd, 0xa1, 0xec, 0x70, 0xd3, - 0xae, 0xe1, 0x97, 0xee, 0x10, 0xbd, 0x0f, 0x57, 0x62, 0x9a, 0x30, 0x0f, 0x3b, 0x99, 0xdb, 0x86, - 0xe4, 0x76, 0x14, 0x75, 0x57, 0x39, 0xb7, 0xa0, 0x3a, 0x20, 0x7e, 0xaf, 0x29, 0x0b, 0x33, 0x5b, - 0x6c, 0xc2, 0x3d, 0xdf, 0x16, 0x4c, 0xf4, 0x31, 0x40, 0x6e, 0xc9, 0xef, 0xb5, 0xce, 0x11, 0x35, - 0x33, 0xbb, 0xbe, 0xf5, 0x35, 0x34, 0x52, 0xf3, 0x8b, 0x60, 0x9e, 0xd0, 0x20, 0x09, 0xf3, 0xb4, - 0xbb, 0x76, 0x4b, 0x11, 0xf6, 0x7c, 0x74, 0x13, 0x24, 0xce, 0x39, 0xa2, 0xab, 0x2a, 0x32, 0x49, - 0x59, 0xa1, 0x2f, 0xb1, 0x44, 0x0a, 0x8f, 0xd2, 0x63, 0xa2, 0xb2, 0x6f, 0xda, 0xe9, 0xc9, 0xfa, - 0xa7, 0x02, 0x57, 0x8a, 0xed, 0x2e, 0x5c, 0x48, 0x2b, 0xb2, 0x56, 0x86, 0x34, 0x23, 0xcd, 0x1e, - 0x14, 0xea, 0x55, 0xd1, 0xeb, 0x95, 0xa9, 0x84, 0xd4, 0x57, 0x0e, 0xba, 0x4a, 0xe5, 0x29, 0xf5, - 0xb1, 0xe8, 0xd6, 0x84, 0xf8, 0xb2, 0xc0, 0x5d, 0x5b, 0x7c, 0x0a, 0xca, 0x90, 0xf8, 0x29, 0x7c, - 0x88, 0x4f, 0x19, 0x1e, 0x93, 0x76, 0x1b, 0xea, 0xca, 0xd4, 0x49, 0x5c, 0x59, 0x28, 0xa8, 0x4d, - 0x75, 0x0f, 0xe2, 0x1b, 0xad, 0x42, 0x9b, 0xe1, 0x51, 0x90, 0x76, 0xaf, 0x2c, 0x9f, 0x69, 0xeb, - 0x24, 0xb4, 0x02, 0xe0, 0xd1, 0x20, 0xc0, 0x9e, 0x14, 0x30, 0xa5, 0x80, 0x46, 0x11, 0x9d, 0xc3, - 0x79, 0xe0, 0xc4, 0xd8, 0xeb, 0xc1, 0xaa, 0xb1, 0x5e, 0xb7, 0x1b, 0x9c, 0x07, 0x07, 0xd8, 0x13, - 0x79, 0x24, 0x31, 0x66, 0x8e, 0x04, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x84, 0xc9, 0x65, 0x80, - 0x21, 0xa3, 0xc9, 0x48, 0x71, 0x3b, 0xab, 0x55, 0x81, 0xc5, 0x92, 0x22, 0xd9, 0xb7, 0xe1, 0x4a, - 0xfc, 0x3a, 0x0c, 0x48, 0x74, 0xec, 0x70, 0x97, 0x0d, 0x31, 0xef, 0x75, 0x55, 0x0f, 0xa7, 0xd4, - 0x97, 0x92, 0x68, 0x7d, 0x03, 0x68, 0x8b, 0x61, 0x97, 0xe3, 0xb7, 0x58, 0x3b, 0x6f, 0x38, 0xdd, - 0xd7, 0x60, 0xbe, 0x60, 0x5a, 0x21, 0xb0, 0xf0, 0xf8, 0x6a, 0xe4, 0xbf, 0x2b, 0x8f, 0x05, 0xd3, - 0xa9, 0xc7, 0xbf, 0x0c, 0x40, 0xdb, 0x72, 0xc0, 0xff, 0xdf, 0x6e, 0x15, 0x23, 0x27, 0x70, 0x5f, - 0x01, 0x88, 0xef, 0x72, 0x37, 0xdd, 0x4a, 0x1d, 0x12, 0x2b, 0xfb, 0xdb, 0x2e, 0x77, 0xd3, 0xed, - 0xc0, 0xb0, 0x97, 0x30, 0xb1, 0xa8, 0x64, 0x5f, 0xc9, 0xed, 0x60, 0x67, 0x24, 0x74, 0x1f, 0xae, - 0x93, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, 0xbf, 0xb5, 0xec, 0x05, 0xc5, - 0xcd, 0x15, 0x76, 0x04, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, 0x17, 0x03, 0x7a, 0x8f, 0x39, - 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x35, 0xe8, 0x0a, 0x30, 0x9d, 0x4c, 0xb4, 0x43, - 0x03, 0x7f, 0xbc, 0xac, 0x6e, 0x82, 0xc0, 0x53, 0x47, 0xcb, 0xb7, 0x49, 0x03, 0x5f, 0xb6, 0xd1, - 0x1a, 0x08, 0xd0, 0xd3, 0xf4, 0xd5, 0xda, 0xee, 0x44, 0xf8, 0xb4, 0xa0, 0x2f, 0x84, 0xa4, 0xbe, - 0x42, 0xca, 0x66, 0x84, 0x4f, 0x85, 0xbe, 0xb5, 0x08, 0x37, 0xa7, 0xc4, 0x96, 0x46, 0xfe, 0x9b, - 0x01, 0xf3, 0x8f, 0xe3, 0x98, 0x0c, 0xa3, 0xaf, 0x24, 0x66, 0x64, 0x41, 0x2f, 0x40, 0xdd, 0xa3, - 0x49, 0xc4, 0x65, 0xb0, 0x75, 0x5b, 0x1d, 0x26, 0xc6, 0xa8, 0x52, 0x1a, 0xa3, 0x89, 0x41, 0xac, - 0x96, 0x07, 0x51, 0x1b, 0xb4, 0x5a, 0x61, 0xd0, 0xde, 0x83, 0xb6, 0xb8, 0x4e, 0xc7, 0xc3, 0x11, - 0xc7, 0x2c, 0x85, 0x59, 0x10, 0xa4, 0x2d, 0x49, 0xb1, 0x7e, 0x32, 0x60, 0xa1, 0x18, 0x69, 0xfa, - 0x9e, 0x38, 0x17, 0xf5, 0x05, 0xcc, 0xb0, 0x20, 0x0d, 0x53, 0x7c, 0x8a, 0x81, 0x1d, 0x25, 0x87, - 0x01, 0xf1, 0x1c, 0xc1, 0x50, 0xe1, 0x99, 0x8a, 0xf2, 0x8a, 0x05, 0xe3, 0xa4, 0x6b, 0x7a, 0xd2, - 0x08, 0x6a, 0x6e, 0xc2, 0x8f, 0x32, 0xe4, 0x17, 0xdf, 0xd6, 0x7d, 0x98, 0x57, 0x4f, 0xbc, 0x62, - 0xd5, 0x96, 0x01, 0x72, 0x2c, 0x8e, 0x7b, 0x86, 0x02, 0x84, 0x0c, 0x8c, 0x63, 0xeb, 0x73, 0x30, - 0xf7, 0xa9, 0x2a, 0x44, 0x8c, 0xee, 0x81, 0x19, 0x64, 0x07, 0x29, 0xda, 0xde, 0x44, 0xe3, 0xa1, - 0xca, 0xe4, 0xec, 0xb1, 0x90, 0xf5, 0x08, 0x5a, 0x19, 0x39, 0xcb, 0xcd, 0x38, 0x2f, 0xb7, 0xca, - 0x44, 0x6e, 0xd6, 0x9f, 0x06, 0x2c, 0x14, 0x43, 0x4e, 0xcb, 0xf7, 0x0a, 0xba, 0xb9, 0x0b, 0x27, - 0x74, 0x47, 0x69, 0x2c, 0xf7, 0xf4, 0x58, 0xca, 0x6a, 0x79, 0x80, 0xf1, 0x53, 0x77, 0xa4, 0x5a, - 0xaa, 0x13, 0x68, 0xa4, 0xfe, 0x4b, 0x98, 0x2b, 0x89, 0x4c, 0x79, 0xdf, 0x7c, 0xa8, 0xbf, 0x6f, - 0x0a, 0x6f, 0xb4, 0x5c, 0x5b, 0x7f, 0xf4, 0x3c, 0x84, 0x1b, 0x6a, 0xfe, 0xb6, 0xf2, 0xa6, 0xcb, - 0x6a, 0x5f, 0xec, 0x4d, 0x63, 0xb2, 0x37, 0xad, 0x3e, 0xf4, 0xca, 0xaa, 0xe9, 0x14, 0x0c, 0x61, - 0xee, 0x80, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0x43, 0x7b, 0xa2, 0x99, 0x8d, 0xcb, 0xb6, 0x4a, - 0x79, 0x1c, 0x66, 0xa1, 0xca, 0x79, 0xd6, 0x67, 0xe2, 0x53, 0xdc, 0x02, 0xd2, 0x3d, 0xa5, 0x77, - 0xf0, 0x0e, 0x5c, 0x89, 0x7e, 0xe0, 0x94, 0xbb, 0x81, 0xda, 0xda, 0x35, 0xb9, 0xb5, 0x4d, 0x49, - 0x91, 0x6b, 0x5b, 0x2d, 0x36, 0x5f, 0x71, 0xeb, 0x6a, 0xa7, 0x0b, 0x82, 0x64, 0x2e, 0x03, 0xc8, - 0x91, 0x52, 0xd3, 0xd0, 0x50, 0xba, 0x82, 0xb2, 0x25, 0x08, 0xd6, 0x0a, 0x2c, 0x7d, 0x81, 0xb9, - 0x78, 0x7f, 0xb0, 0x2d, 0x1a, 0x0d, 0xc8, 0x30, 0x61, 0xae, 0x76, 0x15, 0xd6, 0xcf, 0x06, 0x2c, - 0x9f, 0x23, 0x90, 0x26, 0xdc, 0x83, 0x66, 0xe8, 0xc6, 0x1c, 0xb3, 0x6c, 0x4a, 0xb2, 0xe3, 0x64, - 0x29, 0x2a, 0x97, 0x95, 0xa2, 0x5a, 0x2a, 0xc5, 0x35, 0x68, 0x84, 0xee, 0x99, 0x13, 0x1e, 0xa6, - 0x0f, 0x8c, 0x7a, 0xe8, 0x9e, 0x3d, 0x3d, 0xdc, 0xfc, 0xbb, 0x09, 0x9d, 0x03, 0xec, 0x9e, 0x62, - 0xec, 0xcb, 0xc0, 0xd0, 0x30, 0x1b, 0x88, 0xe2, 0xcf, 0x34, 0x74, 0x7b, 0xb2, 0xf3, 0xa7, 0xfe, - 0x2e, 0xec, 0xdf, 0xb9, 0x4c, 0x2c, 0xed, 0xad, 0x19, 0xf4, 0x0c, 0xda, 0xda, 0xef, 0x20, 0xb4, - 0xa4, 0x29, 0x96, 0x7e, 0xde, 0xf5, 0x97, 0xcf, 0xe1, 0x66, 0xd6, 0xee, 0x19, 0x68, 0x1f, 0xda, - 0xda, 0x56, 0xd7, 0xed, 0x95, 0xdf, 0x11, 0xba, 0xbd, 0x69, 0x4f, 0x81, 0x19, 0x61, 0x4d, 0xdb, - 0xd8, 0xba, 0xb5, 0xf2, 0x1b, 0x41, 0xb7, 0x36, 0x6d, 0xcd, 0x4b, 0x6b, 0xda, 0x82, 0xd4, 0xad, - 0x95, 0xd7, 0xbf, 0x6e, 0x6d, 0xda, 0x56, 0x9d, 0x41, 0xdf, 0xc1, 0x5c, 0x69, 0x75, 0x21, 0x6b, - 0xac, 0x75, 0xde, 0xce, 0xed, 0xaf, 0x5d, 0x28, 0x93, 0xdb, 0x7f, 0x0e, 0x1d, 0x7d, 0xa5, 0x20, - 0x2d, 0xa0, 0x29, 0x4b, 0xb1, 0xbf, 0x72, 0x1e, 0x5b, 0x37, 0xa8, 0xa3, 0xa5, 0x6e, 0x70, 0xca, - 0xbe, 0xd0, 0x0d, 0x4e, 0x03, 0x59, 0x6b, 0x06, 0x7d, 0x0b, 0xb3, 0x93, 0xa8, 0x85, 0x6e, 0x4d, - 0x96, 0xad, 0x04, 0x86, 0x7d, 0xeb, 0x22, 0x91, 0xdc, 0xf8, 0x1e, 0xc0, 0x18, 0x8c, 0xd0, 0xe2, - 0x58, 0xa7, 0x04, 0x86, 0xfd, 0xa5, 0xe9, 0xcc, 0xdc, 0xd4, 0xf7, 0x70, 0x6d, 0xea, 0xc4, 0x23, - 0x6d, 0x4c, 0x2e, 0xc2, 0x8c, 0xfe, 0x07, 0x97, 0xca, 0x65, 0xbe, 0x9e, 0xac, 0xc0, 0x6c, 0xac, - 0x06, 0x79, 0x10, 0x6f, 0x78, 0x01, 0xc1, 0x11, 0x7f, 0x02, 0x52, 0xe3, 0x05, 0xa3, 0x9c, 0x1e, - 0x36, 0xe4, 0x3f, 0x3c, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0x14, 0x43, 0x9d, 0xb9, 0xf0, - 0x11, 0x00, 0x00, -} diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go new file mode 100644 index 000000000..65bd85c84 --- /dev/null +++ b/weed/pb/filer_pb/filer_client.go @@ -0,0 +1,299 @@ +package filer_pb + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + +type FilerClient interface { + WithFilerClient(fn func(SeaweedFilerClient) error) error + AdjustedUrl(location *Location) string +} + +func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { + + dir, name := fullFilePath.DirAndName() + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + return nil + } + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) + return err + } + + if resp.Entry == nil { + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + return nil + } + + entry = resp.Entry + return nil + }) + + return +} + +type EachEntryFunciton func(entry *Entry, isLast bool) error + +func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) { + + var counter uint32 + var startFrom string + var counterFunc = func(entry *Entry, isLast bool) error { + counter++ + startFrom = entry.Name + return fn(entry, isLast) + } + + var paginationLimit uint32 = 10000 + + if err = doList(filerClient, fullDirPath, prefix, counterFunc, "", false, paginationLimit); err != nil { + return err + } + + for counter == paginationLimit { + counter = 0 + if err = doList(filerClient, fullDirPath, prefix, counterFunc, startFrom, false, paginationLimit); err != nil { + return err + } + } + + return nil +} + +func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) + }) +} + +func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + return doSeaweedList(client, fullDirPath, prefix, fn, startFrom, inclusive, limit) + }) +} + +func SeaweedList(client SeaweedFilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) +} + +func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + // Redundancy limit to make it correctly judge whether it is the last file. + redLimit := limit + if limit != math.MaxInt32 && limit != 0 { + redLimit = limit + 1 + } + request := &ListEntriesRequest{ + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: startFrom, + Limit: redLimit, + InclusiveStartFrom: inclusive, + } + + glog.V(4).Infof("read directory: %v", request) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.ListEntries(ctx, request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } + + var prevEntry *Entry + count := 0 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + if prevEntry != nil { + if err := fn(prevEntry, true); err != nil { + return err + } + } + break + } else { + return recvErr + } + } + if prevEntry != nil { + if err := fn(prevEntry, false); err != nil { + return err + } + } + prevEntry = resp.Entry + count++ + if count > int(limit) && limit != 0 { + prevEntry = nil + } + } + + return nil +} + +func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + } + + glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + exists = false + return nil + } + glog.V(0).Infof("exists entry %v: %v", request, err) + return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + exists = resp.Entry.IsDirectory == isDirectory + + return nil + }) + + return +} + +func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) { + + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) + if err := UpdateEntry(client, request); err != nil { + glog.V(0).Infof("touch exists entry %v: %v", request, err) + return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + return nil + }) + +} + +func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: dirName, + IsDirectory: true, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Uid: OS_UID, + Gid: OS_GID, + }, + } + + if fn != nil { + fn(entry) + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("mkdir: %v", request) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %v: %v", request, err) + return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) + } + + return nil + }) +} + +func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: fileName, + IsDirectory: false, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0770), + Uid: OS_UID, + Gid: OS_GID, + }, + Chunks: chunks, + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("create file %v:%v", request, err) + return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + } + + return nil + }) +} + +func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + deleteEntryRequest := &DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: name, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: ignoreRecursiveErr, + IsFromOtherCluster: isFromOtherCluster, + Signatures: signatures, + } + if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil { + if strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil + } + return err + } else { + if resp.Error != "" { + if strings.Contains(resp.Error, ErrNotFound.Error()) { + return nil + } + return errors.New(resp.Error) + } + } + + return nil + + }) +} diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go new file mode 100644 index 000000000..4e5b65f12 --- /dev/null +++ b/weed/pb/filer_pb/filer_client_bfs.go @@ -0,0 +1,63 @@ +package filer_pb + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + K := 5 + + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(util.FullPath) + processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() + } + }() + } + jobQueueWg.Wait() + isTerminating = true + return +} + +func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error { + + fn(parentPath, entry) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + jobQueueWg.Add(1) + queue.Enqueue(util.FullPath(subDir)) + } + return nil + }) + +} diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 5c40332e6..b46385c8f 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -1,10 +1,18 @@ package filer_pb import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/golang/protobuf/proto" + "github.com/viant/ptrie" ) -func toFileIdObject(fileIdStr string) (*FileId, error) { +func ToFileIdObject(fileIdStr string) (*FileId, error) { t, err := needle.ParseFileIdFromString(fileIdStr) if err != nil { return nil, err @@ -37,14 +45,14 @@ func BeforeEntrySerialization(chunks []*FileChunk) { for _, chunk := range chunks { if chunk.FileId != "" { - if fid, err := toFileIdObject(chunk.FileId); err == nil { + if fid, err := ToFileIdObject(chunk.FileId); err == nil { chunk.Fid = fid chunk.FileId = "" } } if chunk.SourceFileId != "" { - if fid, err := toFileIdObject(chunk.SourceFileId); err == nil { + if fid, err := ToFileIdObject(chunk.SourceFileId); err == nil { chunk.SourceFid = fid chunk.SourceFileId = "" } @@ -53,6 +61,15 @@ func BeforeEntrySerialization(chunks []*FileChunk) { } } +func EnsureFid(chunk *FileChunk) { + if chunk.Fid != nil { + return + } + if fid, err := ToFileIdObject(chunk.FileId); err == nil { + chunk.Fid = fid + } +} + func AfterEntryDeserialization(chunks []*FileChunk) { for _, chunk := range chunks { @@ -67,3 +84,66 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } + +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry: %v", err) + } + if resp.Error != "" { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) + return fmt.Errorf("CreateEntry : %v", resp.Error) + } + return nil +} + +func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error { + _, err := client.UpdateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err) + return fmt.Errorf("UpdateEntry: %v", err) + } + return nil +} + +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := client.LookupDirectoryEntry(context.Background(), request) + if err != nil { + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, ErrNotFound + } + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) + } + if resp.Entry == nil { + return nil, ErrNotFound + } + return resp, nil +} + +var ErrNotFound = errors.New("filer: no entry is found in filer store") + +func IsCreate(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry == nil +} +func IsUpdate(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && + event.EventNotification.OldEntry != nil && + event.Directory == event.EventNotification.NewParentPath +} +func IsDelete(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry != nil +} +func IsRename(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && + event.EventNotification.OldEntry != nil && + event.Directory != event.EventNotification.NewParentPath +} + +var _ = ptrie.KeyProvider(&FilerConf_PathConf{}) + +func (fp *FilerConf_PathConf) Key() interface{} { + key, _ := proto.Marshal(fp) + return string(key) +} diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go index d4468c011..0009afdbe 100644 --- a/weed/pb/filer_pb/filer_pb_helper_test.go +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -9,7 +9,7 @@ import ( func TestFileIdSize(t *testing.T) { fileIdStr := "11745,0293434534cbb9892b" - fid, _ := toFileIdObject(fileIdStr) + fid, _ := ToFileIdObject(fileIdStr) bytes, _ := proto.Marshal(fid) println(len(fileIdStr)) diff --git a/weed/pb/filer_pb/signature.go b/weed/pb/filer_pb/signature.go new file mode 100644 index 000000000..e13afc656 --- /dev/null +++ b/weed/pb/filer_pb/signature.go @@ -0,0 +1,13 @@ +package filer_pb + +func (r *CreateEntryRequest) AddSignature(sig int32) { + r.Signatures = append(r.Signatures, sig) +} +func (r *CreateEntryRequest) HasSigned(sig int32) bool { + for _, s := range r.Signatures { + if s == sig { + return true + } + } + return false +} diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go new file mode 100644 index 000000000..9efcd9bdc --- /dev/null +++ b/weed/pb/grpc_client_server.go @@ -0,0 +1,204 @@ +package pb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +const ( + Max_Message_Size = 1 << 30 // 1 GB +) + +var ( + // cache grpc connections + grpcClients = make(map[string]*grpc.ClientConn) + grpcClientsLock sync.Mutex +) + +func init() { + http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024 + http.DefaultTransport.(*http.Transport).MaxIdleConns = 1024 +} + +func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { + var options []grpc.ServerOption + options = append(options, + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, // wait time before ping if no activity + Timeout: 20 * time.Second, // ping timeout + MaxConnectionAge: 10 * time.Hour, + }), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 60 * time.Second, // min time a client should wait before sending a ping + PermitWithoutStream: false, + }), + grpc.MaxRecvMsgSize(Max_Message_Size), + grpc.MaxSendMsgSize(Max_Message_Size), + ) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.NewServer(options...) +} + +func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + // opts = append(opts, grpc.WithBlock()) + // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) + var options []grpc.DialOption + options = append(options, + // grpc.WithInsecure(), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(Max_Message_Size), + grpc.MaxCallRecvMsgSize(Max_Message_Size), + ), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, + PermitWithoutStream: false, + })) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.DialContext(ctx, address, options...) +} + +func getOrCreateConnection(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + + grpcClientsLock.Lock() + defer grpcClientsLock.Unlock() + + existingConnection, found := grpcClients[address] + if found { + return existingConnection, nil + } + + grpcConnection, err := GrpcDial(context.Background(), address, opts...) + if err != nil { + return nil, fmt.Errorf("fail to dial %s: %v", address, err) + } + + grpcClients[address] = grpcConnection + + return grpcConnection, nil +} + +func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { + + grpcConnection, err := getOrCreateConnection(address, opts...) + if err != nil { + return fmt.Errorf("getOrCreateConnection %s: %v", address, err) + } + return fn(grpcConnection) +} + +func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { + return ParseServerAddress(server, 10000) +} + +func ParseServerAddress(server string, deltaPort int) (newServerAddress string, err error) { + + host, port, parseErr := hostAndPort(server) + if parseErr != nil { + return "", fmt.Errorf("server port parse error: %v", parseErr) + } + + newPort := int(port) + deltaPort + + return fmt.Sprintf("%s:%d", host, newPort), nil +} + +func hostAndPort(address string) (host string, port uint64, err error) { + colonIndex := strings.LastIndex(address, ":") + if colonIndex < 0 { + return "", 0, fmt.Errorf("server should have hostname:port format: %v", address) + } + port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("server port parse error: %v", err) + } + + return address[:colonIndex], port, err +} + +func ServerToGrpcAddress(server string) (serverGrpcAddress string) { + + host, port, parseErr := hostAndPort(server) + if parseErr != nil { + glog.Fatalf("server address %s parse error: %v", server, parseErr) + } + + grpcPort := int(port) + 10000 + + return fmt.Sprintf("%s:%d", host, grpcPort) +} + +func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) { + host, grpcPort, parseErr := hostAndPort(grpcAddress) + if parseErr != nil { + glog.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr) + } + + port := int(grpcPort) - 10000 + + return fmt.Sprintf("%s:%d", host, port) +} + +func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + + masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master) + if parseErr != nil { + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) + } + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(client) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := messaging_pb.NewSeaweedMessagingClient(grpcConnection) + return fn(client) + }, brokerGrpcAddress, grpcDialOption) + +} + +func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr) + } + + return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn) + +} + +func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto new file mode 100644 index 000000000..558bd2b70 --- /dev/null +++ b/weed/pb/iam.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package iam_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "IamProto"; + +////////////////////////////////////////////////// + +service SeaweedIdentityAccessManagement { + +} + +////////////////////////////////////////////////// + +message S3ApiConfiguration { + repeated Identity identities = 1; +} + +message Identity { + string name = 1; + repeated Credential credentials = 2; + repeated string actions = 3; +} + +message Credential { + string access_key = 1; + string secret_key = 2; + // uint64 expiration = 3; + // bool is_disabled = 4; +} + +/* +message Policy { + repeated Statement statements = 1; +} + +message Statement { + repeated Action action = 1; + repeated Resource resource = 2; +} + +message Action { + string action = 1; +} +message Resource { + string bucket = 1; + // string path = 2; +} +*/
\ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go new file mode 100644 index 000000000..7d0b6281b --- /dev/null +++ b/weed/pb/iam_pb/iam.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: iam.proto + +package iam_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type S3ApiConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"` +} + +func (x *S3ApiConfiguration) Reset() { + *x = S3ApiConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S3ApiConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3ApiConfiguration) ProtoMessage() {} + +func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3ApiConfiguration.ProtoReflect.Descriptor instead. +func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{0} +} + +func (x *S3ApiConfiguration) GetIdentities() []*Identity { + if x != nil { + return x.Identities + } + return nil +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{1} +} + +func (x *Identity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Identity) GetCredentials() []*Credential { + if x != nil { + return x.Credentials + } + return nil +} + +func (x *Identity) GetActions() []string { + if x != nil { + return x.Actions + } + return nil +} + +type Credential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` +} + +func (x *Credential) Reset() { + *x = Credential{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credential) ProtoMessage() {} + +func (x *Credential) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credential.ProtoReflect.Descriptor instead. +func (*Credential) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{2} +} + +func (x *Credential) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +func (x *Credential) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +var File_iam_proto protoreflect.FileDescriptor + +var file_iam_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d, + 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, + 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, + 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, + 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_iam_proto_rawDescOnce sync.Once + file_iam_proto_rawDescData = file_iam_proto_rawDesc +) + +func file_iam_proto_rawDescGZIP() []byte { + file_iam_proto_rawDescOnce.Do(func() { + file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData) + }) + return file_iam_proto_rawDescData +} + +var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_iam_proto_goTypes = []interface{}{ + (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration + (*Identity)(nil), // 1: iam_pb.Identity + (*Credential)(nil), // 2: iam_pb.Credential +} +var file_iam_proto_depIdxs = []int32{ + 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity + 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_iam_proto_init() } +func file_iam_proto_init() { + if File_iam_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ApiConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Credential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_iam_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_iam_proto_goTypes, + DependencyIndexes: file_iam_proto_depIdxs, + MessageInfos: file_iam_proto_msgTypes, + }.Build() + File_iam_proto = out.File + file_iam_proto_rawDesc = nil + file_iam_proto_goTypes = nil + file_iam_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedIdentityAccessManagementClient interface { +} + +type seaweedIdentityAccessManagementClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) SeaweedIdentityAccessManagementClient { + return &seaweedIdentityAccessManagementClient{cc} +} + +// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service. +type SeaweedIdentityAccessManagementServer interface { +} + +// UnimplementedSeaweedIdentityAccessManagementServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedIdentityAccessManagementServer struct { +} + +func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) { + s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv) +} + +var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ + ServiceName: "iam_pb.SeaweedIdentityAccessManagement", + HandlerType: (*SeaweedIdentityAccessManagementServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "iam.proto", +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 9b1e884c7..cdb49d1e3 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package master_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb"; + ////////////////////////////////////////////////// service Seaweed { @@ -23,8 +25,17 @@ service Seaweed { } rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) { } + rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) { + } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { } + rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) { + } + rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) { + } + rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) { + } + } ////////////////////////////////////////////////// @@ -33,7 +44,6 @@ message Heartbeat { string ip = 1; uint32 port = 2; string public_url = 3; - uint32 max_volume_count = 4; uint64 max_file_key = 5; string data_center = 6; string rack = 7; @@ -51,6 +61,8 @@ message Heartbeat { repeated VolumeEcShardInformationMessage deleted_ec_shards = 18; bool has_no_ec_shards = 19; + map<string, uint32> max_volume_counts = 4; + } message HeartbeatResponse { @@ -76,6 +88,7 @@ message VolumeInformationMessage { int64 modified_at_second = 12; string remote_storage_name = 13; string remote_storage_key = 14; + string disk_type = 15; } message VolumeShortInformationMessage { @@ -84,12 +97,14 @@ message VolumeShortInformationMessage { uint32 replica_placement = 8; uint32 version = 9; uint32 ttl = 10; + string disk_type = 15; } message VolumeEcShardInformationMessage { uint32 id = 1; string collection = 2; uint32 ec_index_bits = 3; + string disk_type = 4; } message StorageBackend { @@ -112,6 +127,7 @@ message SuperBlockExtra { message KeepConnectedRequest { string name = 1; + uint32 grpc_port = 2; } message VolumeLocation { @@ -120,6 +136,7 @@ message VolumeLocation { repeated uint32 new_vids = 3; repeated uint32 deleted_vids = 4; string leader = 5; // optional when leader is not itself + string data_center = 6; // optional when DataCenter is in use } message LookupVolumeRequest { @@ -150,6 +167,7 @@ message AssignRequest { string data_node = 7; uint32 memory_map_max_size_mb = 8; uint32 Writable_volume_count = 9; + string disk_type = 10; } message AssignResponse { string fid = 1; @@ -164,11 +182,9 @@ message StatisticsRequest { string replication = 1; string collection = 2; string ttl = 3; + string disk_type = 4; } message StatisticsResponse { - string replication = 1; - string collection = 2; - string ttl = 3; uint64 total_size = 4; uint64 used_size = 5; uint64 file_count = 6; @@ -177,11 +193,6 @@ message StatisticsResponse { // // collection related // - -message StorageType { - string replication = 1; - string ttl = 2; -} message Collection { string name = 1; } @@ -202,8 +213,8 @@ message CollectionDeleteResponse { // // volume related // -message DataNodeInfo { - string id = 1; +message DiskInfo { + string type = 1; uint64 volume_count = 2; uint64 max_volume_count = 3; uint64 free_volume_count = 4; @@ -212,32 +223,24 @@ message DataNodeInfo { repeated VolumeEcShardInformationMessage ec_shard_infos = 7; uint64 remote_volume_count = 8; } +message DataNodeInfo { + string id = 1; + map<string, DiskInfo> diskInfos = 2; +} message RackInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated DataNodeInfo data_node_infos = 6; - uint64 remote_volume_count = 7; + repeated DataNodeInfo data_node_infos = 2; + map<string, DiskInfo> diskInfos = 3; } message DataCenterInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated RackInfo rack_infos = 6; - uint64 remote_volume_count = 7; + repeated RackInfo rack_infos = 2; + map<string, DiskInfo> diskInfos = 3; } message TopologyInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated DataCenterInfo data_center_infos = 6; - uint64 remote_volume_count = 7; + repeated DataCenterInfo data_center_infos = 2; + map<string, DiskInfo> diskInfos = 3; } message VolumeListRequest { } @@ -258,9 +261,44 @@ message LookupEcVolumeResponse { repeated EcShardIdLocation shard_id_locations = 2; } +message VacuumVolumeRequest { + float garbage_threshold = 1; +} +message VacuumVolumeResponse { +} + message GetMasterConfigurationRequest { } message GetMasterConfigurationResponse { string metrics_address = 1; uint32 metrics_interval_seconds = 2; + repeated StorageBackend storage_backends = 3; + string default_replication = 4; + string leader = 5; +} + +message ListMasterClientsRequest { + string client_type = 1; +} +message ListMasterClientsResponse { + repeated string grpc_addresses = 1; +} + +message LeaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; + string client_name = 4; +} +message LeaseAdminTokenResponse { + int64 token = 1; + int64 lock_ts_ns = 2; +} + +message ReleaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; +} +message ReleaseAdminTokenResponse { } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index ea4362c92..29d8499f8 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1,1485 +1,4088 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: master.proto -// DO NOT EDIT! - -/* -Package master_pb is a generated protocol buffer package. - -It is generated from these files: - master.proto - -It has these top-level messages: - Heartbeat - HeartbeatResponse - VolumeInformationMessage - VolumeShortInformationMessage - VolumeEcShardInformationMessage - StorageBackend - Empty - SuperBlockExtra - KeepConnectedRequest - VolumeLocation - LookupVolumeRequest - LookupVolumeResponse - Location - AssignRequest - AssignResponse - StatisticsRequest - StatisticsResponse - StorageType - Collection - CollectionListRequest - CollectionListResponse - CollectionDeleteRequest - CollectionDeleteResponse - DataNodeInfo - RackInfo - DataCenterInfo - TopologyInfo - VolumeListRequest - VolumeListResponse - LookupEcVolumeRequest - LookupEcVolumeResponse - GetMasterConfigurationRequest - GetMasterConfigurationResponse -*/ -package master_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package master_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Heartbeat struct { - Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` - DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"` - AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` - Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey,proto3" json:"max_file_key,omitempty"` + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort,proto3" json:"admin_port,omitempty"` + Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes,proto3" json:"volumes,omitempty"` // delta volumes - NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes" json:"new_volumes,omitempty"` - DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes" json:"deleted_volumes,omitempty"` - HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes" json:"has_no_volumes,omitempty"` + NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes,proto3" json:"new_volumes,omitempty"` + DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes,proto3" json:"deleted_volumes,omitempty"` + HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes,proto3" json:"has_no_volumes,omitempty"` // erasure coding - EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards" json:"ec_shards,omitempty"` + EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards,proto3" json:"ec_shards,omitempty"` // delta erasure coding shards - NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards" json:"new_ec_shards,omitempty"` - DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards" json:"deleted_ec_shards,omitempty"` - HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards" json:"has_no_ec_shards,omitempty"` + NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"` + DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"` + HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"` + MaxVolumeCounts map[string]uint32 `protobuf:"bytes,4,rep,name=max_volume_counts,json=maxVolumeCounts,proto3" json:"max_volume_counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (m *Heartbeat) Reset() { *m = Heartbeat{} } -func (m *Heartbeat) String() string { return proto.CompactTextString(m) } -func (*Heartbeat) ProtoMessage() {} -func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Heartbeat) GetIp() string { - if m != nil { - return m.Ip +func (x *Heartbeat) Reset() { + *x = Heartbeat{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *Heartbeat) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Heartbeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Heartbeat) ProtoMessage() {} + +func (x *Heartbeat) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead. +func (*Heartbeat) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{0} } -func (m *Heartbeat) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *Heartbeat) GetIp() string { + if x != nil { + return x.Ip } return "" } -func (m *Heartbeat) GetMaxVolumeCount() uint32 { - if m != nil { - return m.MaxVolumeCount +func (x *Heartbeat) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } -func (m *Heartbeat) GetMaxFileKey() uint64 { - if m != nil { - return m.MaxFileKey +func (x *Heartbeat) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +func (x *Heartbeat) GetMaxFileKey() uint64 { + if x != nil { + return x.MaxFileKey } return 0 } -func (m *Heartbeat) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *Heartbeat) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } -func (m *Heartbeat) GetRack() string { - if m != nil { - return m.Rack +func (x *Heartbeat) GetRack() string { + if x != nil { + return x.Rack } return "" } -func (m *Heartbeat) GetAdminPort() uint32 { - if m != nil { - return m.AdminPort +func (x *Heartbeat) GetAdminPort() uint32 { + if x != nil { + return x.AdminPort } return 0 } -func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { - if m != nil { - return m.Volumes +func (x *Heartbeat) GetVolumes() []*VolumeInformationMessage { + if x != nil { + return x.Volumes } return nil } -func (m *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { - if m != nil { - return m.NewVolumes +func (x *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.NewVolumes } return nil } -func (m *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { - if m != nil { - return m.DeletedVolumes +func (x *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.DeletedVolumes } return nil } -func (m *Heartbeat) GetHasNoVolumes() bool { - if m != nil { - return m.HasNoVolumes +func (x *Heartbeat) GetHasNoVolumes() bool { + if x != nil { + return x.HasNoVolumes } return false } -func (m *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.EcShards +func (x *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShards } return nil } -func (m *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.NewEcShards +func (x *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.NewEcShards } return nil } -func (m *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.DeletedEcShards +func (x *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.DeletedEcShards } return nil } -func (m *Heartbeat) GetHasNoEcShards() bool { - if m != nil { - return m.HasNoEcShards +func (x *Heartbeat) GetHasNoEcShards() bool { + if x != nil { + return x.HasNoEcShards } return false } +func (x *Heartbeat) GetMaxVolumeCounts() map[string]uint32 { + if x != nil { + return x.MaxVolumeCounts + } + return nil +} + type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` - Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` - MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` - StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends" json:"storage_backends,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` } -func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } -func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) } -func (*HeartbeatResponse) ProtoMessage() {} -func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *HeartbeatResponse) Reset() { + *x = HeartbeatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { - if m != nil { - return m.VolumeSizeLimit +func (x *HeartbeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatResponse) ProtoMessage() {} + +func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead. +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{1} +} + +func (x *HeartbeatResponse) GetVolumeSizeLimit() uint64 { + if x != nil { + return x.VolumeSizeLimit } return 0 } -func (m *HeartbeatResponse) GetLeader() string { - if m != nil { - return m.Leader +func (x *HeartbeatResponse) GetLeader() string { + if x != nil { + return x.Leader } return "" } -func (m *HeartbeatResponse) GetMetricsAddress() string { - if m != nil { - return m.MetricsAddress +func (x *HeartbeatResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress } return "" } -func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { - if m != nil { - return m.MetricsIntervalSeconds +func (x *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds } return 0 } -func (m *HeartbeatResponse) GetStorageBackends() []*StorageBackend { - if m != nil { - return m.StorageBackends +func (x *HeartbeatResponse) GetStorageBackends() []*StorageBackend { + if x != nil { + return x.StorageBackends } return nil } type VolumeInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` - CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` - RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName" json:"remote_storage_name,omitempty"` - RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey" json:"remote_storage_key,omitempty"` -} - -func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } -func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeInformationMessage) ProtoMessage() {} -func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *VolumeInformationMessage) GetId() uint32 { - if m != nil { - return m.Id + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeInformationMessage) Reset() { + *x = VolumeInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeInformationMessage) ProtoMessage() {} + +func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{2} +} + +func (x *VolumeInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeInformationMessage) GetSize() uint64 { - if m != nil { - return m.Size +func (x *VolumeInformationMessage) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *VolumeInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeInformationMessage) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *VolumeInformationMessage) GetFileCount() uint64 { + if x != nil { + return x.FileCount } return 0 } -func (m *VolumeInformationMessage) GetDeleteCount() uint64 { - if m != nil { - return m.DeleteCount +func (x *VolumeInformationMessage) GetDeleteCount() uint64 { + if x != nil { + return x.DeleteCount } return 0 } -func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 { - if m != nil { - return m.DeletedByteCount +func (x *VolumeInformationMessage) GetDeletedByteCount() uint64 { + if x != nil { + return x.DeletedByteCount } return 0 } -func (m *VolumeInformationMessage) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *VolumeInformationMessage) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 { - if m != nil { - return m.ReplicaPlacement +func (x *VolumeInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement } return 0 } -func (m *VolumeInformationMessage) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeInformationMessage) GetTtl() uint32 { - if m != nil { - return m.Ttl +func (x *VolumeInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl } return 0 } -func (m *VolumeInformationMessage) GetCompactRevision() uint32 { - if m != nil { - return m.CompactRevision +func (x *VolumeInformationMessage) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision } return 0 } -func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 { - if m != nil { - return m.ModifiedAtSecond +func (x *VolumeInformationMessage) GetModifiedAtSecond() int64 { + if x != nil { + return x.ModifiedAtSecond } return 0 } -func (m *VolumeInformationMessage) GetRemoteStorageName() string { - if m != nil { - return m.RemoteStorageName +func (x *VolumeInformationMessage) GetRemoteStorageName() string { + if x != nil { + return x.RemoteStorageName + } + return "" +} + +func (x *VolumeInformationMessage) GetRemoteStorageKey() string { + if x != nil { + return x.RemoteStorageKey } return "" } -func (m *VolumeInformationMessage) GetRemoteStorageKey() string { - if m != nil { - return m.RemoteStorageKey +func (x *VolumeInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type VolumeShortInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeShortInformationMessage) Reset() { + *x = VolumeShortInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeShortInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeShortInformationMessage) ProtoMessage() {} + +func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeShortInformationMessage) Reset() { *m = VolumeShortInformationMessage{} } -func (m *VolumeShortInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeShortInformationMessage) ProtoMessage() {} -func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +// Deprecated: Use VolumeShortInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{3} +} -func (m *VolumeShortInformationMessage) GetId() uint32 { - if m != nil { - return m.Id +func (x *VolumeShortInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeShortInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeShortInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { - if m != nil { - return m.ReplicaPlacement +func (x *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement } return 0 } -func (m *VolumeShortInformationMessage) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeShortInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeShortInformationMessage) GetTtl() uint32 { - if m != nil { - return m.Ttl +func (x *VolumeShortInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl } return 0 } +func (x *VolumeShortInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type VolumeEcShardInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeEcShardInformationMessage) Reset() { + *x = VolumeEcShardInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (m *VolumeEcShardInformationMessage) GetId() uint32 { - if m != nil { - return m.Id +func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{4} +} + +func (x *VolumeEcShardInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeEcShardInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { - if m != nil { - return m.EcIndexBits +func (x *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { + if x != nil { + return x.EcIndexBits } return 0 } +func (x *VolumeEcShardInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type StorageBackend struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *StorageBackend) Reset() { *m = StorageBackend{} } -func (m *StorageBackend) String() string { return proto.CompactTextString(m) } -func (*StorageBackend) ProtoMessage() {} -func (*StorageBackend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *StorageBackend) Reset() { + *x = StorageBackend{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *StorageBackend) GetType() string { - if m != nil { - return m.Type +func (x *StorageBackend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageBackend) ProtoMessage() {} + +func (x *StorageBackend) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageBackend.ProtoReflect.Descriptor instead. +func (*StorageBackend) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{5} +} + +func (x *StorageBackend) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *StorageBackend) GetId() string { - if m != nil { - return m.Id +func (x *StorageBackend) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *StorageBackend) GetProperties() map[string]string { - if m != nil { - return m.Properties +func (x *StorageBackend) GetProperties() map[string]string { + if x != nil { + return x.Properties } return nil } type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type SuperBlockExtra struct { - ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } -func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*Empty) ProtoMessage() {} -func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { - if m != nil { - return m.ErasureCoding +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type SuperBlockExtra_ErasureCoding struct { - Data uint32 `protobuf:"varint,1,opt,name=data" json:"data,omitempty"` - Parity uint32 `protobuf:"varint,2,opt,name=parity" json:"parity,omitempty"` - VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{6} } -func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_ErasureCoding{} } -func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} -func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{7, 0} +type SuperBlockExtra struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"` } -func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { - if m != nil { - return m.Data +func (x *SuperBlockExtra) Reset() { + *x = SuperBlockExtra{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *SuperBlockExtra_ErasureCoding) GetParity() uint32 { - if m != nil { - return m.Parity +func (x *SuperBlockExtra) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SuperBlockExtra) ProtoMessage() {} + +func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use SuperBlockExtra.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7} } -func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { - if m != nil { - return m.VolumeIds +func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { + if x != nil { + return x.ErasureCoding } return nil } type KeepConnectedRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} } -func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) } -func (*KeepConnectedRequest) ProtoMessage() {} -func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*KeepConnectedRequest) ProtoMessage() {} -func (m *KeepConnectedRequest) GetName() string { - if m != nil { - return m.Name +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{8} +} + +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name } return "" } +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + type VolumeLocation struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"` - Leader string `protobuf:"bytes,5,opt,name=leader" json:"leader,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"` + DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // optional when DataCenter is in use +} + +func (x *VolumeLocation) Reset() { + *x = VolumeLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } -func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } -func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*VolumeLocation) ProtoMessage() {} -func (m *VolumeLocation) GetUrl() string { - if m != nil { - return m.Url +func (x *VolumeLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeLocation.ProtoReflect.Descriptor instead. +func (*VolumeLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{9} +} + +func (x *VolumeLocation) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *VolumeLocation) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *VolumeLocation) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *VolumeLocation) GetNewVids() []uint32 { - if m != nil { - return m.NewVids +func (x *VolumeLocation) GetNewVids() []uint32 { + if x != nil { + return x.NewVids } return nil } -func (m *VolumeLocation) GetDeletedVids() []uint32 { - if m != nil { - return m.DeletedVids +func (x *VolumeLocation) GetDeletedVids() []uint32 { + if x != nil { + return x.DeletedVids } return nil } -func (m *VolumeLocation) GetLeader() string { - if m != nil { - return m.Leader +func (x *VolumeLocation) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +func (x *VolumeLocation) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided. } -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (*LookupVolumeRequest) ProtoMessage() {} + +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{10} +} + +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds } return nil } -func (m *LookupVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *LookupVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type LookupVolumeResponse struct { - VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations" json:"volume_id_locations,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"` +} -func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { - if m != nil { - return m.VolumeIdLocations +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type LookupVolumeResponse_VolumeIdLocation struct { - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVolumeResponse_VolumeIdLocation{} } -func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{11, 0} -} +func (*LookupVolumeResponse) ProtoMessage() {} -func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11} } -func (m *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { + if x != nil { + return x.VolumeIdLocations } return nil } -func (m *LookupVolumeResponse_VolumeIdLocation) GetError() string { - if m != nil { - return m.Error +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{12} +} -func (m *Location) GetUrl() string { - if m != nil { - return m.Url +func (x *Location) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } type AssignRequest struct { - Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` - WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount" json:"Writable_volume_count,omitempty"` -} - -func (m *AssignRequest) Reset() { *m = AssignRequest{} } -func (m *AssignRequest) String() string { return proto.CompactTextString(m) } -func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *AssignRequest) GetCount() uint64 { - if m != nil { - return m.Count + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"` + DiskType string `protobuf:"bytes,10,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *AssignRequest) Reset() { + *x = AssignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignRequest) ProtoMessage() {} + +func (x *AssignRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignRequest.ProtoReflect.Descriptor instead. +func (*AssignRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{13} +} + +func (x *AssignRequest) GetCount() uint64 { + if x != nil { + return x.Count } return 0 } -func (m *AssignRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *AssignRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *AssignRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *AssignRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *AssignRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *AssignRequest) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *AssignRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *AssignRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } -func (m *AssignRequest) GetRack() string { - if m != nil { - return m.Rack +func (x *AssignRequest) GetRack() string { + if x != nil { + return x.Rack } return "" } -func (m *AssignRequest) GetDataNode() string { - if m != nil { - return m.DataNode +func (x *AssignRequest) GetDataNode() string { + if x != nil { + return x.DataNode } return "" } -func (m *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { - if m != nil { - return m.MemoryMapMaxSizeMb +func (x *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb } return 0 } -func (m *AssignRequest) GetWritableVolumeCount() uint32 { - if m != nil { - return m.WritableVolumeCount +func (x *AssignRequest) GetWritableVolumeCount() uint32 { + if x != nil { + return x.WritableVolumeCount } return 0 } +func (x *AssignRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type AssignResponse struct { - Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` - Auth string `protobuf:"bytes,6,opt,name=auth" json:"auth,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"` } -func (m *AssignResponse) Reset() { *m = AssignResponse{} } -func (m *AssignResponse) String() string { return proto.CompactTextString(m) } -func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *AssignResponse) Reset() { + *x = AssignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignResponse) ProtoMessage() {} + +func (x *AssignResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead. +func (*AssignResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{14} +} -func (m *AssignResponse) GetFid() string { - if m != nil { - return m.Fid +func (x *AssignResponse) GetFid() string { + if x != nil { + return x.Fid } return "" } -func (m *AssignResponse) GetUrl() string { - if m != nil { - return m.Url +func (x *AssignResponse) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *AssignResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *AssignResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *AssignResponse) GetCount() uint64 { - if m != nil { - return m.Count +func (x *AssignResponse) GetCount() uint64 { + if x != nil { + return x.Count } return 0 } -func (m *AssignResponse) GetError() string { - if m != nil { - return m.Error +func (x *AssignResponse) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *AssignResponse) GetAuth() string { - if m != nil { - return m.Auth +func (x *AssignResponse) GetAuth() string { + if x != nil { + return x.Auth } return "" } type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*StatisticsRequest) ProtoMessage() {} -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{15} } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *StatisticsRequest) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection +type StatisticsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{16} } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize } return 0 } -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize } return 0 } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount } return 0 } -type StorageType struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` -} +// +// collection related +// +type Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StorageType) Reset() { *m = StorageType{} } -func (m *StorageType) String() string { return proto.CompactTextString(m) } -func (*StorageType) ProtoMessage() {} -func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} -func (m *StorageType) GetReplication() string { - if m != nil { - return m.Replication +func (x *Collection) Reset() { + *x = Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *StorageType) GetTtl() string { - if m != nil { - return m.Ttl - } - return "" +func (x *Collection) String() string { + return protoimpl.X.MessageStringOf(x) } -type Collection struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +func (*Collection) ProtoMessage() {} + +func (x *Collection) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Collection) Reset() { *m = Collection{} } -func (m *Collection) String() string { return proto.CompactTextString(m) } -func (*Collection) ProtoMessage() {} -func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +// Deprecated: Use Collection.ProtoReflect.Descriptor instead. +func (*Collection) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{17} +} -func (m *Collection) GetName() string { - if m != nil { - return m.Name +func (x *Collection) GetName() string { + if x != nil { + return x.Name } return "" } type CollectionListRequest struct { - IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes" json:"include_normal_volumes,omitempty"` - IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes" json:"include_ec_volumes,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` } -func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } -func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } -func (*CollectionListRequest) ProtoMessage() {} -func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (x *CollectionListRequest) Reset() { + *x = CollectionListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListRequest) ProtoMessage() {} + +func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. +func (*CollectionListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{18} +} -func (m *CollectionListRequest) GetIncludeNormalVolumes() bool { - if m != nil { - return m.IncludeNormalVolumes +func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { + if x != nil { + return x.IncludeNormalVolumes } return false } -func (m *CollectionListRequest) GetIncludeEcVolumes() bool { - if m != nil { - return m.IncludeEcVolumes +func (x *CollectionListRequest) GetIncludeEcVolumes() bool { + if x != nil { + return x.IncludeEcVolumes } return false } type CollectionListResponse struct { - Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } -func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } -func (*CollectionListResponse) ProtoMessage() {} -func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (x *CollectionListResponse) Reset() { + *x = CollectionListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CollectionListResponse) GetCollections() []*Collection { - if m != nil { - return m.Collections +func (x *CollectionListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListResponse) ProtoMessage() {} + +func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. +func (*CollectionListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{19} +} + +func (x *CollectionListResponse) GetCollections() []*Collection { + if x != nil { + return x.Collections } return nil } type CollectionDeleteRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *CollectionDeleteRequest) Reset() { + *x = CollectionDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteRequest) ProtoMessage() {} + +func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } -func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*CollectionDeleteRequest) ProtoMessage() {} -func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +// Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead. +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{20} +} -func (m *CollectionDeleteRequest) GetName() string { - if m != nil { - return m.Name +func (x *CollectionDeleteRequest) GetName() string { + if x != nil { + return x.Name } return "" } type CollectionDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectionDeleteResponse) Reset() { + *x = CollectionDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } -func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*CollectionDeleteResponse) ProtoMessage() {} -func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (x *CollectionDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteResponse) ProtoMessage() {} + +func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead. +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{21} +} // // volume related // -type DataNodeInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` - EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +type DiskInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"` + EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *DiskInfo) Reset() { + *x = DiskInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } -func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } -func (*DataNodeInfo) ProtoMessage() {} -func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (x *DiskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiskInfo) ProtoMessage() {} + +func (x *DiskInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiskInfo.ProtoReflect.Descriptor instead. +func (*DiskInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{22} +} -func (m *DataNodeInfo) GetId() string { - if m != nil { - return m.Id +func (x *DiskInfo) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *DataNodeInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount +func (x *DiskInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount } return 0 } -func (m *DataNodeInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +func (x *DiskInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount } return 0 } -func (m *DataNodeInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *DiskInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount } return 0 } -func (m *DataNodeInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +func (x *DiskInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount } return 0 } -func (m *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage { - if m != nil { - return m.VolumeInfos +func (x *DiskInfo) GetVolumeInfos() []*VolumeInformationMessage { + if x != nil { + return x.VolumeInfos } return nil } -func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { - if m != nil { - return m.EcShardInfos +func (x *DiskInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShardInfos } return nil } -func (m *DataNodeInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *DiskInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount } return 0 } -type RackInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +type DataNodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,2,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DataNodeInfo) Reset() { + *x = DataNodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataNodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataNodeInfo) ProtoMessage() {} + +func (x *DataNodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *RackInfo) Reset() { *m = RackInfo{} } -func (m *RackInfo) String() string { return proto.CompactTextString(m) } -func (*RackInfo) ProtoMessage() {} -func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +// Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead. +func (*DataNodeInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{23} +} -func (m *RackInfo) GetId() string { - if m != nil { - return m.Id +func (x *DataNodeInfo) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *RackInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount +func (x *DataNodeInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos } - return 0 + return nil } -func (m *RackInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +type RackInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,2,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RackInfo) Reset() { + *x = RackInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *RackInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *RackInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RackInfo) ProtoMessage() {} + +func (x *RackInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use RackInfo.ProtoReflect.Descriptor instead. +func (*RackInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{24} } -func (m *RackInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +func (x *RackInfo) GetId() string { + if x != nil { + return x.Id } - return 0 + return "" } -func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { - if m != nil { - return m.DataNodeInfos +func (x *RackInfo) GetDataNodeInfos() []*DataNodeInfo { + if x != nil { + return x.DataNodeInfos } return nil } -func (m *RackInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *RackInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos } - return 0 + return nil } type DataCenterInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,2,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DataCenterInfo) Reset() { + *x = DataCenterInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } -func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } -func (*DataCenterInfo) ProtoMessage() {} -func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (x *DataCenterInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *DataCenterInfo) GetId() string { - if m != nil { - return m.Id +func (*DataCenterInfo) ProtoMessage() {} + +func (x *DataCenterInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead. +func (*DataCenterInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{25} +} + +func (x *DataCenterInfo) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *DataCenterInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount +func (x *DataCenterInfo) GetRackInfos() []*RackInfo { + if x != nil { + return x.RackInfos } - return 0 + return nil } -func (m *DataCenterInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +func (x *DataCenterInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos } - return 0 + return nil +} + +type TopologyInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,2,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *DataCenterInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *TopologyInfo) Reset() { + *x = TopologyInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *DataCenterInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +func (x *TopologyInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopologyInfo) ProtoMessage() {} + +func (x *TopologyInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead. +func (*TopologyInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{26} +} + +func (x *TopologyInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { + if x != nil { + return x.DataCenterInfos + } + return nil } -func (m *DataCenterInfo) GetRackInfos() []*RackInfo { - if m != nil { - return m.RackInfos +func (x *TopologyInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos } return nil } -func (m *DataCenterInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +type VolumeListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeListRequest) Reset() { + *x = VolumeListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListRequest) ProtoMessage() {} + +func (x *VolumeListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead. +func (*VolumeListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{27} +} + +type VolumeListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"` + VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"` +} + +func (x *VolumeListResponse) Reset() { + *x = VolumeListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListResponse) ProtoMessage() {} + +func (x *VolumeListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead. +func (*VolumeListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo { + if x != nil { + return x.TopologyInfo + } + return nil +} + +func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { + if x != nil { + return x.VolumeSizeLimitMb } return 0 } -type TopologyInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +type LookupEcVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *LookupEcVolumeRequest) Reset() { + *x = LookupEcVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } -func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } -func (*TopologyInfo) ProtoMessage() {} -func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*LookupEcVolumeRequest) ProtoMessage() {} -func (m *TopologyInfo) GetId() string { - if m != nil { - return m.Id +func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{29} } -func (m *TopologyInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount +func (x *LookupEcVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *TopologyInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +type LookupEcVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"` +} + +func (x *LookupEcVolumeResponse) Reset() { + *x = LookupEcVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeResponse) ProtoMessage() {} + +func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30} +} + +func (x *LookupEcVolumeResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *TopologyInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { + if x != nil { + return x.ShardIdLocations + } + return nil +} + +type VacuumVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageThreshold float32 `protobuf:"fixed32,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` +} + +func (x *VacuumVolumeRequest) Reset() { + *x = VacuumVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeRequest) ProtoMessage() {} + +func (x *VacuumVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{31} +} + +func (x *VacuumVolumeRequest) GetGarbageThreshold() float32 { + if x != nil { + return x.GarbageThreshold } return 0 } -func (m *TopologyInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +type VacuumVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VacuumVolumeResponse) Reset() { + *x = VacuumVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeResponse) ProtoMessage() {} + +func (x *VacuumVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{32} +} + +type GetMasterConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMasterConfigurationRequest) Reset() { + *x = GetMasterConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMasterConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMasterConfigurationRequest) ProtoMessage() {} + +func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{33} +} + +type GetMasterConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` + DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *GetMasterConfigurationResponse) Reset() { + *x = GetMasterConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMasterConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMasterConfigurationResponse) ProtoMessage() {} + +func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{34} +} + +func (x *GetMasterConfigurationResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress + } + return "" +} + +func (x *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds } return 0 } -func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { - if m != nil { - return m.DataCenterInfos +func (x *GetMasterConfigurationResponse) GetStorageBackends() []*StorageBackend { + if x != nil { + return x.StorageBackends } return nil } -func (m *TopologyInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *GetMasterConfigurationResponse) GetDefaultReplication() string { + if x != nil { + return x.DefaultReplication } - return 0 + return "" } -type VolumeListRequest struct { +func (x *GetMasterConfigurationResponse) GetLeader() string { + if x != nil { + return x.Leader + } + return "" } -func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } -func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeListRequest) ProtoMessage() {} -func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +type ListMasterClientsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VolumeListResponse struct { - TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` - VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb" json:"volume_size_limit_mb,omitempty"` + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` +} + +func (x *ListMasterClientsRequest) Reset() { + *x = ListMasterClientsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMasterClientsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMasterClientsRequest) ProtoMessage() {} + +func (x *ListMasterClientsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } -func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeListResponse) ProtoMessage() {} -func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +// Deprecated: Use ListMasterClientsRequest.ProtoReflect.Descriptor instead. +func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{35} +} -func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { - if m != nil { - return m.TopologyInfo +func (x *ListMasterClientsRequest) GetClientType() string { + if x != nil { + return x.ClientType + } + return "" +} + +type ListMasterClientsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` +} + +func (x *ListMasterClientsResponse) Reset() { + *x = ListMasterClientsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMasterClientsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMasterClientsResponse) ProtoMessage() {} + +func (x *ListMasterClientsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMasterClientsResponse.ProtoReflect.Descriptor instead. +func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{36} +} + +func (x *ListMasterClientsResponse) GetGrpcAddresses() []string { + if x != nil { + return x.GrpcAddresses } return nil } -func (m *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { - if m != nil { - return m.VolumeSizeLimitMb +type LeaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` + ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` +} + +func (x *LeaseAdminTokenRequest) Reset() { + *x = LeaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaseAdminTokenRequest) ProtoMessage() {} + +func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{37} +} + +func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken } return 0 } -type LookupEcVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *LeaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime + } + return 0 +} + +func (x *LeaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName + } + return "" +} + +func (x *LeaseAdminTokenRequest) GetClientName() string { + if x != nil { + return x.ClientName + } + return "" +} + +type LeaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"` + LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"` } -func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} } -func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeRequest) ProtoMessage() {} -func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (x *LeaseAdminTokenResponse) Reset() { + *x = LeaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaseAdminTokenResponse) ProtoMessage() {} + +func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{38} +} -func (m *LookupEcVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *LeaseAdminTokenResponse) GetToken() int64 { + if x != nil { + return x.Token } return 0 } -type LookupEcVolumeResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations" json:"shard_id_locations,omitempty"` +func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 { + if x != nil { + return x.LockTsNs + } + return 0 +} + +type ReleaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` +} + +func (x *ReleaseAdminTokenRequest) Reset() { + *x = ReleaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseAdminTokenRequest) ProtoMessage() {} + +func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} } -func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeResponse) ProtoMessage() {} -func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +// Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{39} +} -func (m *LookupEcVolumeResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken } return 0 } -func (m *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { - if m != nil { - return m.ShardIdLocations +func (x *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime } - return nil + return 0 } -type LookupEcVolumeResponse_EcShardIdLocation struct { - ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` +func (x *ReleaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName + } + return "" } -func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() { - *m = LookupEcVolumeResponse_EcShardIdLocation{} +type ReleaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} -func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{30, 0} + +func (x *ReleaseAdminTokenResponse) Reset() { + *x = ReleaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseAdminTokenResponse) ProtoMessage() {} + +func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{40} +} + +type SuperBlockExtra_ErasureCoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"` + Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"` + VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } -func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { - if m != nil { - return m.ShardId +func (x *SuperBlockExtra_ErasureCoding) Reset() { + *x = SuperBlockExtra_ErasureCoding{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SuperBlockExtra_ErasureCoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} + +func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SuperBlockExtra_ErasureCoding.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *SuperBlockExtra_ErasureCoding) GetData() uint32 { + if x != nil { + return x.Data } return 0 } -func (m *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *SuperBlockExtra_ErasureCoding) GetParity() uint32 { + if x != nil { + return x.Parity + } + return 0 +} + +func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { + if x != nil { + return x.VolumeIds } return nil } -type GetMasterConfigurationRequest struct { +type LookupVolumeResponse_VolumeIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } -func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} } -func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationRequest) ProtoMessage() {} -func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (x *LookupVolumeResponse_VolumeIdLocation) Reset() { + *x = LookupVolumeResponse_VolumeIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetMasterConfigurationResponse struct { - MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` +func (x *LookupVolumeResponse_VolumeIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { - if m != nil { - return m.MetricsAddress +func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse_VolumeIdLocation.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" +} + +func (x *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +func (x *LookupVolumeResponse_VolumeIdLocation) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { - if m != nil { - return m.MetricsIntervalSeconds +type LookupEcVolumeResponse_EcShardIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` +} + +func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() { + *x = LookupEcVolumeResponse_EcShardIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} + +func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30, 0} +} + +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { + if x != nil { + return x.ShardId } return 0 } -func init() { - proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") - proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") - proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") - proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage") - proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage") - proto.RegisterType((*StorageBackend)(nil), "master_pb.StorageBackend") - proto.RegisterType((*Empty)(nil), "master_pb.Empty") - proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") - proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") - proto.RegisterType((*KeepConnectedRequest)(nil), "master_pb.KeepConnectedRequest") - proto.RegisterType((*VolumeLocation)(nil), "master_pb.VolumeLocation") - proto.RegisterType((*LookupVolumeRequest)(nil), "master_pb.LookupVolumeRequest") - proto.RegisterType((*LookupVolumeResponse)(nil), "master_pb.LookupVolumeResponse") - proto.RegisterType((*LookupVolumeResponse_VolumeIdLocation)(nil), "master_pb.LookupVolumeResponse.VolumeIdLocation") - proto.RegisterType((*Location)(nil), "master_pb.Location") - proto.RegisterType((*AssignRequest)(nil), "master_pb.AssignRequest") - proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") - proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") - proto.RegisterType((*StorageType)(nil), "master_pb.StorageType") - proto.RegisterType((*Collection)(nil), "master_pb.Collection") - proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest") - proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse") - proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest") - proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse") - proto.RegisterType((*DataNodeInfo)(nil), "master_pb.DataNodeInfo") - proto.RegisterType((*RackInfo)(nil), "master_pb.RackInfo") - proto.RegisterType((*DataCenterInfo)(nil), "master_pb.DataCenterInfo") - proto.RegisterType((*TopologyInfo)(nil), "master_pb.TopologyInfo") - proto.RegisterType((*VolumeListRequest)(nil), "master_pb.VolumeListRequest") - proto.RegisterType((*VolumeListResponse)(nil), "master_pb.VolumeListResponse") - proto.RegisterType((*LookupEcVolumeRequest)(nil), "master_pb.LookupEcVolumeRequest") - proto.RegisterType((*LookupEcVolumeResponse)(nil), "master_pb.LookupEcVolumeResponse") - proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation") - proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest") - proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse") +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +var File_master_proto protoreflect.FileDescriptor + +var file_master_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0xfc, 0x06, 0x0a, 0x09, 0x48, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, + 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a, + 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, + 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x08, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d, + 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65, + 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x55, 0x0a, + 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x2e, 0x4d, + 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x22, 0x98, 0x04, 0x0a, 0x18, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, + 0x74, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, + 0x12, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, + 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, + 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, + 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x92, + 0x01, 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x62, + 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x63, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x42, 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01, + 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, + 0x61, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x47, + 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, + 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, + 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0b, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3b, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0xd0, 0x02, 0x0a, 0x0d, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, + 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15, + 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x93, 0x01, + 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66, + 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, + 0x75, 0x74, 0x68, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0a, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, + 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, + 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x08, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x63, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c, + 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb7, 0x01, 0x0a, + 0x0c, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x44, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x63, 0x6b, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xef, 0x01, 0x0a, 0x0e, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, + 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x12, 0x46, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x01, 0x0a, 0x0c, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x45, 0x0a, 0x11, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x74, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, + 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x62, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, + 0x0a, 0x16, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, + 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x11, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x42, 0x0a, 0x13, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x67, + 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x22, + 0x16, 0x0a, 0x14, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x92, 0x02, 0x0a, 0x1e, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, + 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x3b, 0x0a, + 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69, + 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab, + 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, + 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, + 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, + 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, + 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, + 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, + 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_master_proto_rawDescOnce sync.Once + file_master_proto_rawDescData = file_master_proto_rawDesc +) + +func file_master_proto_rawDescGZIP() []byte { + file_master_proto_rawDescOnce.Do(func() { + file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData) + }) + return file_master_proto_rawDescData +} + +var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 50) +var file_master_proto_goTypes = []interface{}{ + (*Heartbeat)(nil), // 0: master_pb.Heartbeat + (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse + (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage + (*VolumeShortInformationMessage)(nil), // 3: master_pb.VolumeShortInformationMessage + (*VolumeEcShardInformationMessage)(nil), // 4: master_pb.VolumeEcShardInformationMessage + (*StorageBackend)(nil), // 5: master_pb.StorageBackend + (*Empty)(nil), // 6: master_pb.Empty + (*SuperBlockExtra)(nil), // 7: master_pb.SuperBlockExtra + (*KeepConnectedRequest)(nil), // 8: master_pb.KeepConnectedRequest + (*VolumeLocation)(nil), // 9: master_pb.VolumeLocation + (*LookupVolumeRequest)(nil), // 10: master_pb.LookupVolumeRequest + (*LookupVolumeResponse)(nil), // 11: master_pb.LookupVolumeResponse + (*Location)(nil), // 12: master_pb.Location + (*AssignRequest)(nil), // 13: master_pb.AssignRequest + (*AssignResponse)(nil), // 14: master_pb.AssignResponse + (*StatisticsRequest)(nil), // 15: master_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 16: master_pb.StatisticsResponse + (*Collection)(nil), // 17: master_pb.Collection + (*CollectionListRequest)(nil), // 18: master_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 19: master_pb.CollectionListResponse + (*CollectionDeleteRequest)(nil), // 20: master_pb.CollectionDeleteRequest + (*CollectionDeleteResponse)(nil), // 21: master_pb.CollectionDeleteResponse + (*DiskInfo)(nil), // 22: master_pb.DiskInfo + (*DataNodeInfo)(nil), // 23: master_pb.DataNodeInfo + (*RackInfo)(nil), // 24: master_pb.RackInfo + (*DataCenterInfo)(nil), // 25: master_pb.DataCenterInfo + (*TopologyInfo)(nil), // 26: master_pb.TopologyInfo + (*VolumeListRequest)(nil), // 27: master_pb.VolumeListRequest + (*VolumeListResponse)(nil), // 28: master_pb.VolumeListResponse + (*LookupEcVolumeRequest)(nil), // 29: master_pb.LookupEcVolumeRequest + (*LookupEcVolumeResponse)(nil), // 30: master_pb.LookupEcVolumeResponse + (*VacuumVolumeRequest)(nil), // 31: master_pb.VacuumVolumeRequest + (*VacuumVolumeResponse)(nil), // 32: master_pb.VacuumVolumeResponse + (*GetMasterConfigurationRequest)(nil), // 33: master_pb.GetMasterConfigurationRequest + (*GetMasterConfigurationResponse)(nil), // 34: master_pb.GetMasterConfigurationResponse + (*ListMasterClientsRequest)(nil), // 35: master_pb.ListMasterClientsRequest + (*ListMasterClientsResponse)(nil), // 36: master_pb.ListMasterClientsResponse + (*LeaseAdminTokenRequest)(nil), // 37: master_pb.LeaseAdminTokenRequest + (*LeaseAdminTokenResponse)(nil), // 38: master_pb.LeaseAdminTokenResponse + (*ReleaseAdminTokenRequest)(nil), // 39: master_pb.ReleaseAdminTokenRequest + (*ReleaseAdminTokenResponse)(nil), // 40: master_pb.ReleaseAdminTokenResponse + nil, // 41: master_pb.Heartbeat.MaxVolumeCountsEntry + nil, // 42: master_pb.StorageBackend.PropertiesEntry + (*SuperBlockExtra_ErasureCoding)(nil), // 43: master_pb.SuperBlockExtra.ErasureCoding + (*LookupVolumeResponse_VolumeIdLocation)(nil), // 44: master_pb.LookupVolumeResponse.VolumeIdLocation + nil, // 45: master_pb.DataNodeInfo.DiskInfosEntry + nil, // 46: master_pb.RackInfo.DiskInfosEntry + nil, // 47: master_pb.DataCenterInfo.DiskInfosEntry + nil, // 48: master_pb.TopologyInfo.DiskInfosEntry + (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 49: master_pb.LookupEcVolumeResponse.EcShardIdLocation +} +var file_master_proto_depIdxs = []int32{ + 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage + 3, // 1: master_pb.Heartbeat.new_volumes:type_name -> master_pb.VolumeShortInformationMessage + 3, // 2: master_pb.Heartbeat.deleted_volumes:type_name -> master_pb.VolumeShortInformationMessage + 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 41, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry + 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend + 42, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 43, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 44, // 10: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 17, // 11: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection + 2, // 12: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage + 4, // 13: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage + 45, // 14: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry + 23, // 15: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo + 46, // 16: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry + 24, // 17: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo + 47, // 18: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry + 25, // 19: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo + 48, // 20: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry + 26, // 21: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo + 49, // 22: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 5, // 23: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend + 12, // 24: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location + 22, // 25: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 26: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 27: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 28: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 12, // 29: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location + 0, // 30: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat + 8, // 31: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest + 10, // 32: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest + 13, // 33: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest + 15, // 34: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest + 18, // 35: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest + 20, // 36: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest + 27, // 37: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest + 29, // 38: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest + 31, // 39: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest + 33, // 40: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 35, // 41: master_pb.Seaweed.ListMasterClients:input_type -> master_pb.ListMasterClientsRequest + 37, // 42: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 39, // 43: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 1, // 44: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 9, // 45: master_pb.Seaweed.KeepConnected:output_type -> master_pb.VolumeLocation + 11, // 46: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 14, // 47: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 16, // 48: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 19, // 49: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 21, // 50: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 28, // 51: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 30, // 52: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 32, // 53: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse + 34, // 54: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 36, // 55: master_pb.Seaweed.ListMasterClients:output_type -> master_pb.ListMasterClientsResponse + 38, // 56: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 40, // 57: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 44, // [44:58] is the sub-list for method output_type + 30, // [30:44] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_master_proto_init() } +func file_master_proto_init() { + if File_master_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Heartbeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartbeatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeShortInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageBackend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataNodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RackInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataCenterInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra_ErasureCoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_master_proto_rawDesc, + NumEnums: 0, + NumMessages: 50, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_master_proto_goTypes, + DependencyIndexes: file_master_proto_depIdxs, + MessageInfos: file_master_proto_msgTypes, + }.Build() + File_master_proto = out.File + file_master_proto_rawDesc = nil + file_master_proto_goTypes = nil + file_master_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Seaweed service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedClient is the client API for Seaweed service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedClient interface { SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) @@ -1490,19 +4093,23 @@ type SeaweedClient interface { CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) + VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) + ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) + LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) } type seaweedClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient { +func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient { return &seaweedClient{cc} } func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/master_pb.Seaweed/SendHeartbeat", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...) if err != nil { return nil, err } @@ -1533,7 +4140,7 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) { } func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...) if err != nil { return nil, err } @@ -1565,7 +4172,7 @@ func (x *seaweedKeepConnectedClient) Recv() (*VolumeLocation, error) { func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -1574,7 +4181,7 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) { out := new(AssignResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...) if err != nil { return nil, err } @@ -1583,7 +4190,7 @@ func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...g func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -1592,7 +4199,7 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { out := new(CollectionListResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -1601,7 +4208,7 @@ func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRe func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { out := new(CollectionDeleteResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...) if err != nil { return nil, err } @@ -1610,7 +4217,7 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { out := new(VolumeListResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...) if err != nil { return nil, err } @@ -1619,7 +4226,16 @@ func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, o func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) { out := new(LookupEcVolumeResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) { + out := new(VacuumVolumeResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VacuumVolume", in, out, opts...) if err != nil { return nil, err } @@ -1628,15 +4244,41 @@ func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRe func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) { out := new(GetMasterConfigurationResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) { + out := new(ListMasterClientsResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) { + out := new(LeaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Seaweed service +func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) { + out := new(ReleaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// SeaweedServer is the server API for Seaweed service. type SeaweedServer interface { SendHeartbeat(Seaweed_SendHeartbeatServer) error KeepConnected(Seaweed_KeepConnectedServer) error @@ -1647,7 +4289,58 @@ type SeaweedServer interface { CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) + VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) + ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) + LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) +} + +// UnimplementedSeaweedServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedServer struct { +} + +func (*UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error { + return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented") +} +func (*UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented") +} +func (*UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented") +} +func (*UnimplementedSeaweedServer) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionDelete not implemented") +} +func (*UnimplementedSeaweedServer) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeList not implemented") +} +func (*UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupEcVolume not implemented") +} +func (*UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented") +} +func (*UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented") +} +func (*UnimplementedSeaweedServer) ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMasterClients not implemented") +} +func (*UnimplementedSeaweedServer) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseAdminToken not implemented") +} +func (*UnimplementedSeaweedServer) ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseAdminToken not implemented") } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -1832,6 +4525,24 @@ func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).VacuumVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/VacuumVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).VacuumVolume(ctx, req.(*VacuumVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetMasterConfigurationRequest) if err := dec(in); err != nil { @@ -1850,6 +4561,60 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMasterClientsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ListMasterClients(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ListMasterClients", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).LeaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/LeaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ReleaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -1883,9 +4648,25 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ Handler: _Seaweed_LookupEcVolume_Handler, }, { + MethodName: "VacuumVolume", + Handler: _Seaweed_VacuumVolume_Handler, + }, + { MethodName: "GetMasterConfiguration", Handler: _Seaweed_GetMasterConfiguration_Handler, }, + { + MethodName: "ListMasterClients", + Handler: _Seaweed_ListMasterClients_Handler, + }, + { + MethodName: "LeaseAdminToken", + Handler: _Seaweed_LeaseAdminToken_Handler, + }, + { + MethodName: "ReleaseAdminToken", + Handler: _Seaweed_ReleaseAdminToken_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1903,141 +4684,3 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ }, Metadata: "master.proto", } - -func init() { proto.RegisterFile("master.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7, - 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07, - 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4, - 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14, - 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4, - 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74, - 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92, - 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7, - 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7, - 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1, - 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2, - 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32, - 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79, - 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40, - 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23, - 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3, - 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74, - 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6, - 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24, - 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17, - 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b, - 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca, - 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b, - 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e, - 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44, - 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6, - 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8, - 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20, - 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d, - 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6, - 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87, - 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1, - 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b, - 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29, - 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80, - 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa, - 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec, - 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43, - 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4, - 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32, - 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb, - 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31, - 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f, - 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21, - 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62, - 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d, - 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6, - 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e, - 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf, - 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c, - 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8, - 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0, - 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba, - 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e, - 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58, - 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4, - 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29, - 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1, - 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe, - 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2, - 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e, - 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33, - 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72, - 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e, - 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0, - 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85, - 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce, - 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67, - 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d, - 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c, - 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda, - 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1, - 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67, - 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49, - 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a, - 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb, - 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26, - 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d, - 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a, - 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e, - 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a, - 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f, - 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2, - 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9, - 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50, - 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f, - 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6, - 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9, - 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96, - 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57, - 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86, - 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad, - 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a, - 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e, - 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f, - 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06, - 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f, - 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9, - 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6, - 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff, - 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50, - 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a, - 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8, - 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6, - 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93, - 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78, - 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c, - 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71, - 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb, - 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8, - 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2, - 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad, - 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7, - 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b, - 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c, - 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef, - 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54, - 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72, - 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce, - 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e, - 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4, - 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef, - 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94, - 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01, - 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67, - 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16, - 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77, - 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d, - 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b, - 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0, - 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64, - 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00, -} diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto new file mode 100644 index 000000000..04446ad16 --- /dev/null +++ b/weed/pb/messaging.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package messaging_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "MessagingProto"; + +////////////////////////////////////////////////// + +service SeaweedMessaging { + + rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) { + } + + rpc Publish (stream PublishRequest) returns (stream PublishResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { + } + + rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) { + } + +} + +////////////////////////////////////////////////// + +message SubscriberMessage { + message InitMessage { + string namespace = 1; + string topic = 2; + int32 partition = 3; + enum StartPosition { + LATEST = 0; // Start at the newest message + EARLIEST = 1; // Start at the oldest message + TIMESTAMP = 2; // Start after a specified timestamp, exclusive + } + StartPosition startPosition = 4; // Where to begin consuming from + int64 timestampNs = 5; // timestamp in nano seconds + string subscriber_id = 6; // uniquely identify a subscriber to track consumption + } + InitMessage init = 1; + message AckMessage { + int64 message_id = 1; + } + AckMessage ack = 2; + bool is_close = 3; +} + +message Message { + int64 event_time_ns = 1 [jstype = JS_STRING]; + bytes key = 2; // Message key + bytes value = 3; // Message payload + map<string, bytes> headers = 4; // Message headers + bool is_close = 5; +} + +message BrokerMessage { + Message data = 1; +} + +message PublishRequest { + message InitMessage { + string namespace = 1; // only needed on the initial request + string topic = 2; // only needed on the initial request + int32 partition = 3; + } + InitMessage init = 1; + Message data = 2; +} + +message PublishResponse { + message ConfigMessage { + int32 partition_count = 1; + } + ConfigMessage config = 1; + message RedirectMessage { + string new_broker = 1; + } + RedirectMessage redirect = 2; + bool is_closed = 3; +} + +message DeleteTopicRequest { + string namespace = 1; + string topic = 2; +} +message DeleteTopicResponse { +} + +message ConfigureTopicRequest { + string namespace = 1; + string topic = 2; + TopicConfiguration configuration = 3; +} +message ConfigureTopicResponse { +} + +message GetTopicConfigurationRequest { + string namespace = 1; + string topic = 2; +} +message GetTopicConfigurationResponse { + TopicConfiguration configuration = 1; +} + +message FindBrokerRequest { + string namespace = 1; + string topic = 2; + int32 parition = 3; +} + +message FindBrokerResponse { + string broker = 1; +} + +message TopicConfiguration { + int32 partition_count = 1; + string collection = 2; + string replication = 3; + bool is_transient = 4; + enum Partitioning { + NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin + KeyHash = 1; // hash by key value + RoundRobin = 2; // round robin pick one partition + } + Partitioning partitoning = 5; +} diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go new file mode 100644 index 000000000..591406347 --- /dev/null +++ b/weed/pb/messaging_pb/messaging.pb.go @@ -0,0 +1,2053 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: messaging.proto + +package messaging_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type SubscriberMessage_InitMessage_StartPosition int32 + +const ( + SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message + SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message + SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive +) + +// Enum value maps for SubscriberMessage_InitMessage_StartPosition. +var ( + SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{ + 0: "LATEST", + 1: "EARLIEST", + 2: "TIMESTAMP", + } + SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{ + "LATEST": 0, + "EARLIEST": 1, + "TIMESTAMP": 2, + } +) + +func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition { + p := new(SubscriberMessage_InitMessage_StartPosition) + *p = x + return p +} + +func (x SubscriberMessage_InitMessage_StartPosition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[0].Descriptor() +} + +func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[0] +} + +func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead. +func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0} +} + +type TopicConfiguration_Partitioning int32 + +const ( + TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin + TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value + TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition +) + +// Enum value maps for TopicConfiguration_Partitioning. +var ( + TopicConfiguration_Partitioning_name = map[int32]string{ + 0: "NonNullKeyHash", + 1: "KeyHash", + 2: "RoundRobin", + } + TopicConfiguration_Partitioning_value = map[string]int32{ + "NonNullKeyHash": 0, + "KeyHash": 1, + "RoundRobin": 2, + } +) + +func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning { + p := new(TopicConfiguration_Partitioning) + *p = x + return p +} + +func (x TopicConfiguration_Partitioning) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[1].Descriptor() +} + +func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[1] +} + +func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead. +func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13, 0} +} + +type SubscriberMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"` + IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *SubscriberMessage) Reset() { + *x = SubscriberMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage) ProtoMessage() {} + +func (x *SubscriberMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage { + if x != nil { + return x.Ack + } + return nil +} + +func (x *SubscriberMessage) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload + Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers + IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{1} +} + +func (x *Message) GetEventTimeNs() int64 { + if x != nil { + return x.EventTimeNs + } + return 0 +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Message) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *Message) GetHeaders() map[string][]byte { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Message) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type BrokerMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *BrokerMessage) Reset() { + *x = BrokerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BrokerMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BrokerMessage) ProtoMessage() {} + +func (x *BrokerMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead. +func (*BrokerMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{2} +} + +func (x *BrokerMessage) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3} +} + +func (x *PublishRequest) GetInit() *PublishRequest_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *PublishRequest) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"` + IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4} +} + +func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage { + if x != nil { + return x.Config + } + return nil +} + +func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage { + if x != nil { + return x.Redirect + } + return nil +} + +func (x *PublishResponse) GetIsClosed() bool { + if x != nil { + return x.IsClosed + } + return false +} + +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type DeleteTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteTopicResponse) Reset() { + *x = DeleteTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicResponse) ProtoMessage() {} + +func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead. +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{6} +} + +type ConfigureTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *ConfigureTopicRequest) Reset() { + *x = ConfigureTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicRequest) ProtoMessage() {} + +func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead. +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{7} +} + +func (x *ConfigureTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ConfigureTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type ConfigureTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureTopicResponse) Reset() { + *x = ConfigureTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicResponse) ProtoMessage() {} + +func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead. +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{8} +} + +type GetTopicConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicConfigurationRequest) Reset() { + *x = GetTopicConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationRequest) ProtoMessage() {} + +func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{9} +} + +func (x *GetTopicConfigurationRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetTopicConfigurationRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type GetTopicConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *GetTopicConfigurationResponse) Reset() { + *x = GetTopicConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationResponse) ProtoMessage() {} + +func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{10} +} + +func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type FindBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"` +} + +func (x *FindBrokerRequest) Reset() { + *x = FindBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerRequest) ProtoMessage() {} + +func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead. +func (*FindBrokerRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{11} +} + +func (x *FindBrokerRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *FindBrokerRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *FindBrokerRequest) GetParition() int32 { + if x != nil { + return x.Parition + } + return 0 +} + +type FindBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` +} + +func (x *FindBrokerResponse) Reset() { + *x = FindBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerResponse) ProtoMessage() {} + +func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead. +func (*FindBrokerResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{12} +} + +func (x *FindBrokerResponse) GetBroker() string { + if x != nil { + return x.Broker + } + return "" +} + +type TopicConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"` + Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"` +} + +func (x *TopicConfiguration) Reset() { + *x = TopicConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopicConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopicConfiguration) ProtoMessage() {} + +func (x *TopicConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead. +func (*TopicConfiguration) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13} +} + +func (x *TopicConfiguration) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +func (x *TopicConfiguration) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *TopicConfiguration) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *TopicConfiguration) GetIsTransient() bool { + if x != nil { + return x.IsTransient + } + return false +} + +func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning { + if x != nil { + return x.Partitoning + } + return TopicConfiguration_NonNullKeyHash +} + +type SubscriberMessage_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` + StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from + TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds + SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption +} + +func (x *SubscriberMessage_InitMessage) Reset() { + *x = SubscriberMessage_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_InitMessage) ProtoMessage() {} + +func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SubscriberMessage_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition { + if x != nil { + return x.StartPosition + } + return SubscriberMessage_InitMessage_LATEST +} + +func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 { + if x != nil { + return x.TimestampNs + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetSubscriberId() string { + if x != nil { + return x.SubscriberId + } + return "" +} + +type SubscriberMessage_AckMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` +} + +func (x *SubscriberMessage_AckMessage) Reset() { + *x = SubscriberMessage_AckMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_AckMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_AckMessage) ProtoMessage() {} + +func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *SubscriberMessage_AckMessage) GetMessageId() int64 { + if x != nil { + return x.MessageId + } + return 0 +} + +type PublishRequest_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` +} + +func (x *PublishRequest_InitMessage) Reset() { + *x = PublishRequest_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest_InitMessage) ProtoMessage() {} + +func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead. +func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *PublishRequest_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PublishRequest_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +type PublishResponse_ConfigMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` +} + +func (x *PublishResponse_ConfigMessage) Reset() { + *x = PublishResponse_ConfigMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_ConfigMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_ConfigMessage) ProtoMessage() {} + +func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +type PublishResponse_RedirectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"` +} + +func (x *PublishResponse_RedirectMessage) Reset() { + *x = PublishResponse_RedirectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_RedirectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_RedirectMessage) ProtoMessage() {} + +func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *PublishResponse_RedirectMessage) GetNewBroker() string { + if x != nil { + return x.NewBroker + } + return "" +} + +var File_messaging_proto protoreflect.FileDescriptor + +var file_messaging_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, + 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, + 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, + 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45, + 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, + 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, + 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, + 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, + 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, + 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69, + 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, + 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f, + 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32, + 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a, + 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messaging_proto_rawDescOnce sync.Once + file_messaging_proto_rawDescData = file_messaging_proto_rawDesc +) + +func file_messaging_proto_rawDescGZIP() []byte { + file_messaging_proto_rawDescOnce.Do(func() { + file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData) + }) + return file_messaging_proto_rawDescData +} + +var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_messaging_proto_goTypes = []interface{}{ + (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition + (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning + (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage + (*Message)(nil), // 3: messaging_pb.Message + (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage + (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest + (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse + (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest + (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse + (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest + (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse + (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest + (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse + (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest + (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse + (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration + (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage + (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage + nil, // 18: messaging_pb.Message.HeadersEntry + (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage + (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage + (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage +} +var file_messaging_proto_depIdxs = []int32{ + 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage + 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage + 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry + 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message + 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage + 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message + 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage + 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage + 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration + 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration + 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning + 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition + 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage + 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest + 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest + 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest + 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest + 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest + 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage + 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse + 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse + 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse + 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse + 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse + 18, // [18:24] is the sub-list for method output_type + 12, // [12:18] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_messaging_proto_init() } +func file_messaging_proto_init() { + if File_messaging_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BrokerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopicConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_AckMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_ConfigMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_RedirectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messaging_proto_rawDesc, + NumEnums: 2, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_messaging_proto_goTypes, + DependencyIndexes: file_messaging_proto_depIdxs, + EnumInfos: file_messaging_proto_enumTypes, + MessageInfos: file_messaging_proto_msgTypes, + }.Build() + File_messaging_proto = out.File + file_messaging_proto_rawDesc = nil + file_messaging_proto_goTypes = nil + file_messaging_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedMessagingClient is the client API for SeaweedMessaging service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedMessagingClient interface { + Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) + Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) + FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) +} + +type seaweedMessagingClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient { + return &seaweedMessagingClient{cc} +} + +func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingSubscribeClient{stream} + return x, nil +} + +type SeaweedMessaging_SubscribeClient interface { + Send(*SubscriberMessage) error + Recv() (*BrokerMessage, error) + grpc.ClientStream +} + +type seaweedMessagingSubscribeClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) { + m := new(BrokerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingPublishClient{stream} + return x, nil +} + +type SeaweedMessaging_PublishClient interface { + Send(*PublishRequest) error + Recv() (*PublishResponse, error) + grpc.ClientStream +} + +type seaweedMessagingPublishClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) { + m := new(PublishResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { + out := new(GetTopicConfigurationResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) { + out := new(FindBrokerResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedMessagingServer is the server API for SeaweedMessaging service. +type SeaweedMessagingServer interface { + Subscribe(SeaweedMessaging_SubscribeServer) error + Publish(SeaweedMessaging_PublishServer) error + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) + FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) +} + +// UnimplementedSeaweedMessagingServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedMessagingServer struct { +} + +func (*UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (*UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error { + return status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (*UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented") +} +func (*UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented") +} + +func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) { + s.RegisterService(&_SeaweedMessaging_serviceDesc, srv) +} + +func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream}) +} + +type SeaweedMessaging_SubscribeServer interface { + Send(*BrokerMessage) error + Recv() (*SubscriberMessage, error) + grpc.ServerStream +} + +type seaweedMessagingSubscribeServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) { + m := new(SubscriberMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream}) +} + +type SeaweedMessaging_PublishServer interface { + Send(*PublishResponse) error + Recv() (*PublishRequest, error) + grpc.ServerStream +} + +type seaweedMessagingPublishServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) { + m := new(PublishRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).FindBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{ + ServiceName: "messaging_pb.SeaweedMessaging", + HandlerType: (*SeaweedMessagingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteTopic", + Handler: _SeaweedMessaging_DeleteTopic_Handler, + }, + { + MethodName: "ConfigureTopic", + Handler: _SeaweedMessaging_ConfigureTopic_Handler, + }, + { + MethodName: "GetTopicConfiguration", + Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, + }, + { + MethodName: "FindBroker", + Handler: _SeaweedMessaging_FindBroker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _SeaweedMessaging_Subscribe_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Publish", + Handler: _SeaweedMessaging_Publish_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messaging.proto", +} diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go new file mode 100644 index 000000000..1af19e51a --- /dev/null +++ b/weed/pb/shared_values.go @@ -0,0 +1,5 @@ +package pb + +const ( + AdminShellClient = "adminShell" +) diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go index b2edf9c5e..cae9e018f 100644 --- a/weed/pb/volume_info.go +++ b/weed/pb/volume_info.go @@ -15,39 +15,49 @@ import ( ) // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil -func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool) { +func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeInfo, hasRemoteFile bool, hasVolumeInfoFile bool, err error) { - volumeInfo := &volume_server_pb.VolumeInfo{} + volumeInfo = &volume_server_pb.VolumeInfo{} glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { - return volumeInfo, false + return } + hasVolumeInfoFile = true if !canRead { glog.Warningf("can not read %s", fileName) + err = fmt.Errorf("can not read %s", fileName) + return } - return volumeInfo, false + return } + hasVolumeInfoFile = true + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) tierData, readErr := ioutil.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) - return volumeInfo, false + err = fmt.Errorf("fail to read %s : %v", fileName, readErr) + return + } glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) - if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + if err = jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { glog.Warningf("unmarshal error: %v", err) - return volumeInfo, false + err = fmt.Errorf("unmarshal error: %v", err) + return } if len(volumeInfo.GetFiles()) == 0 { - return volumeInfo, false + return } - return volumeInfo, true + hasRemoteFile = true + + return } func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 9cf7272ef..f9836c402 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package volume_server_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"; ////////////////////////////////////////////////// @@ -8,6 +9,7 @@ service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { } + rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) { } rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) { @@ -35,6 +37,12 @@ service VolumeServer { } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } + rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) { + } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } + rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) { + } // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { @@ -44,6 +52,11 @@ service VolumeServer { rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { } + rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) { + } + rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) { + } + rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) { } rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) { @@ -75,16 +88,24 @@ service VolumeServer { rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { } - // query + rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) { + } + rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) { + } + + // <experimental> query rpc Query (QueryRequest) returns (stream QueriedStripe) { } + rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) { + } } ////////////////////////////////////////////////// message BatchDeleteRequest { repeated string file_ids = 1; + bool skip_cookie_check = 2; } message BatchDeleteResponse { @@ -119,6 +140,7 @@ message VacuumVolumeCommitRequest { uint32 volume_id = 1; } message VacuumVolumeCommitResponse { + bool is_read_only = 1; } message VacuumVolumeCleanupRequest { @@ -140,6 +162,7 @@ message AllocateVolumeRequest { string replication = 4; string ttl = 5; uint32 memory_map_max_size_mb = 6; + string disk_type = 7; } message AllocateVolumeResponse { } @@ -189,12 +212,34 @@ message VolumeMarkReadonlyRequest { message VolumeMarkReadonlyResponse { } +message VolumeMarkWritableRequest { + uint32 volume_id = 1; +} +message VolumeMarkWritableResponse { +} + +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + +message VolumeStatusRequest { + uint32 volume_id = 1; +} +message VolumeStatusResponse { + bool is_read_only = 1; +} + message VolumeCopyRequest { uint32 volume_id = 1; string collection = 2; string replication = 3; string ttl = 4; string source_data_node = 5; + string disk_type = 6; } message VolumeCopyResponse { uint64 last_append_at_ns = 1; @@ -213,6 +258,25 @@ message CopyFileResponse { bytes file_content = 1; } +message ReadNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int64 offset = 3; // actual offset + int32 size = 4; +} +message ReadNeedleBlobResponse { + bytes needle_blob = 1; +} + +message WriteNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int32 size = 3; + bytes needle_blob = 4; +} +message WriteNeedleBlobResponse { +} + message VolumeTailSenderRequest { uint32 volume_id = 1; uint64 since_ns = 2; @@ -323,6 +387,7 @@ message ReadVolumeFileStatusResponse { uint64 file_count = 6; uint32 compaction_revision = 7; string collection = 8; + string disk_type = 9; } message DiskStatus { @@ -330,6 +395,9 @@ message DiskStatus { uint64 all = 2; uint64 used = 3; uint64 free = 4; + float percent_free = 5; + float percent_used = 6; + string disk_type = 7; } message MemStatus { @@ -355,6 +423,7 @@ message RemoteFile { message VolumeInfo { repeated RemoteFile files = 1; uint32 version = 2; + string replication = 3; } message VolumeTierMoveDatToRemoteRequest { @@ -378,6 +447,19 @@ message VolumeTierMoveDatFromRemoteResponse { float processedPercentage = 2; } +message VolumeServerStatusRequest { + +} +message VolumeServerStatusResponse { + repeated DiskStatus disk_statuses = 1; + MemStatus memory_status = 2; +} + +message VolumeServerLeaveRequest { +} +message VolumeServerLeaveResponse { +} + // select on volume servers message QueryRequest { repeated string selections = 1; @@ -435,3 +517,16 @@ message QueryRequest { message QueriedStripe { bytes records = 1; } + +message VolumeNeedleStatusRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; +} +message VolumeNeedleStatusResponse { + uint64 needle_id = 1; + uint32 cookie = 2; + uint32 size = 3; + uint64 last_modified = 4; + uint32 crc = 5; + string ttl = 6; +} diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 1c2e10d8e..c642142ba 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,2114 +1,7262 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: volume_server.proto -// DO NOT EDIT! - -/* -Package volume_server_pb is a generated protocol buffer package. - -It is generated from these files: - volume_server.proto - -It has these top-level messages: - BatchDeleteRequest - BatchDeleteResponse - DeleteResult - Empty - VacuumVolumeCheckRequest - VacuumVolumeCheckResponse - VacuumVolumeCompactRequest - VacuumVolumeCompactResponse - VacuumVolumeCommitRequest - VacuumVolumeCommitResponse - VacuumVolumeCleanupRequest - VacuumVolumeCleanupResponse - DeleteCollectionRequest - DeleteCollectionResponse - AllocateVolumeRequest - AllocateVolumeResponse - VolumeSyncStatusRequest - VolumeSyncStatusResponse - VolumeIncrementalCopyRequest - VolumeIncrementalCopyResponse - VolumeMountRequest - VolumeMountResponse - VolumeUnmountRequest - VolumeUnmountResponse - VolumeDeleteRequest - VolumeDeleteResponse - VolumeMarkReadonlyRequest - VolumeMarkReadonlyResponse - VolumeCopyRequest - VolumeCopyResponse - CopyFileRequest - CopyFileResponse - VolumeTailSenderRequest - VolumeTailSenderResponse - VolumeTailReceiverRequest - VolumeTailReceiverResponse - VolumeEcShardsGenerateRequest - VolumeEcShardsGenerateResponse - VolumeEcShardsRebuildRequest - VolumeEcShardsRebuildResponse - VolumeEcShardsCopyRequest - VolumeEcShardsCopyResponse - VolumeEcShardsDeleteRequest - VolumeEcShardsDeleteResponse - VolumeEcShardsMountRequest - VolumeEcShardsMountResponse - VolumeEcShardsUnmountRequest - VolumeEcShardsUnmountResponse - VolumeEcShardReadRequest - VolumeEcShardReadResponse - VolumeEcBlobDeleteRequest - VolumeEcBlobDeleteResponse - VolumeEcShardsToVolumeRequest - VolumeEcShardsToVolumeResponse - ReadVolumeFileStatusRequest - ReadVolumeFileStatusResponse - DiskStatus - MemStatus - RemoteFile - VolumeInfo - VolumeTierMoveDatToRemoteRequest - VolumeTierMoveDatToRemoteResponse - VolumeTierMoveDatFromRemoteRequest - VolumeTierMoveDatFromRemoteResponse - QueryRequest - QueriedStripe -*/ -package volume_server_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package volume_server_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type BatchDeleteRequest struct { - FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` + SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` +} + +func (x *BatchDeleteRequest) Reset() { + *x = BatchDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} } -func (m *BatchDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteRequest) ProtoMessage() {} -func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*BatchDeleteRequest) ProtoMessage() {} -func (m *BatchDeleteRequest) GetFileIds() []string { - if m != nil { - return m.FileIds +func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead. +func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{0} +} + +func (x *BatchDeleteRequest) GetFileIds() []string { + if x != nil { + return x.FileIds } return nil } +func (x *BatchDeleteRequest) GetSkipCookieCheck() bool { + if x != nil { + return x.SkipCookieCheck + } + return false +} + type BatchDeleteResponse struct { - Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchDeleteResponse) Reset() { + *x = BatchDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BatchDeleteResponse) Reset() { *m = BatchDeleteResponse{} } -func (m *BatchDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteResponse) ProtoMessage() {} -func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*BatchDeleteResponse) ProtoMessage() {} -func (m *BatchDeleteResponse) GetResults() []*DeleteResult { - if m != nil { - return m.Results +func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead. +func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{1} +} + +func (x *BatchDeleteResponse) GetResults() []*DeleteResult { + if x != nil { + return x.Results } return nil } type DeleteResult struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - Version uint32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *DeleteResult) Reset() { + *x = DeleteResult{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResult) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteResult) Reset() { *m = DeleteResult{} } -func (m *DeleteResult) String() string { return proto.CompactTextString(m) } -func (*DeleteResult) ProtoMessage() {} -func (*DeleteResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*DeleteResult) ProtoMessage() {} -func (m *DeleteResult) GetFileId() string { - if m != nil { - return m.FileId +func (x *DeleteResult) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead. +func (*DeleteResult) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{2} +} + +func (x *DeleteResult) GetFileId() string { + if x != nil { + return x.FileId } return "" } -func (m *DeleteResult) GetStatus() int32 { - if m != nil { - return m.Status +func (x *DeleteResult) GetStatus() int32 { + if x != nil { + return x.Status } return 0 } -func (m *DeleteResult) GetError() string { - if m != nil { - return m.Error +func (x *DeleteResult) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *DeleteResult) GetSize() uint32 { - if m != nil { - return m.Size +func (x *DeleteResult) GetSize() uint32 { + if x != nil { + return x.Size } return 0 } -func (m *DeleteResult) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *DeleteResult) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{3} +} type VacuumVolumeCheckRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VacuumVolumeCheckRequest) Reset() { + *x = VacuumVolumeCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } -func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{4} +} + +func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VacuumVolumeCheckResponse struct { - GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio" json:"garbage_ratio,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` } -func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} } -func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckResponse) ProtoMessage() {} -func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *VacuumVolumeCheckResponse) Reset() { + *x = VacuumVolumeCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { - if m != nil { - return m.GarbageRatio +func (x *VacuumVolumeCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCheckResponse) ProtoMessage() {} + +func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{5} +} + +func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { + if x != nil { + return x.GarbageRatio } return 0 } type VacuumVolumeCompactRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` } -func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} } -func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *VacuumVolumeCompactRequest) Reset() { + *x = VacuumVolumeCompactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCompactRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCompactRequest) ProtoMessage() {} + +func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{6} +} + +func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VacuumVolumeCompactRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate +func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } return 0 } type VacuumVolumeCompactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} } -func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *VacuumVolumeCompactResponse) Reset() { + *x = VacuumVolumeCompactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCompactResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCompactResponse) ProtoMessage() {} + +func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{7} +} type VacuumVolumeCommitRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VacuumVolumeCommitRequest) Reset() { + *x = VacuumVolumeCommitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCommitRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } -func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{8} +} + +func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VacuumVolumeCommitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } -func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } -func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (x *VacuumVolumeCommitResponse) Reset() { + *x = VacuumVolumeCommitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCommitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCommitResponse) ProtoMessage() {} + +func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{9} +} + +func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { + if x != nil { + return x.IsReadOnly + } + return false +} type VacuumVolumeCleanupRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VacuumVolumeCleanupRequest) Reset() { + *x = VacuumVolumeCleanupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCleanupRequest) ProtoMessage() {} + +func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } -func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +// Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{10} +} -func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VacuumVolumeCleanupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VacuumVolumeCleanupResponse) Reset() { + *x = VacuumVolumeCleanupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCleanupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCleanupResponse) ProtoMessage() {} + +func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} } -func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +// Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{11} +} type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (*DeleteCollectionRequest) ProtoMessage() {} + +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{12} +} + +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{13} +} type AllocateVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *AllocateVolumeRequest) Reset() { + *x = AllocateVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AllocateVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AllocateVolumeRequest) ProtoMessage() {} + +func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } -func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateVolumeRequest) ProtoMessage() {} -func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +// Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead. +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{14} +} -func (m *AllocateVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *AllocateVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *AllocateVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *AllocateVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *AllocateVolumeRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate +func (x *AllocateVolumeRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } return 0 } -func (m *AllocateVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *AllocateVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *AllocateVolumeRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *AllocateVolumeRequest) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { - if m != nil { - return m.MemoryMapMaxSizeMb +func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb } return 0 } +func (x *AllocateVolumeRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type AllocateVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AllocateVolumeResponse) Reset() { + *x = AllocateVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AllocateVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AllocateVolumeResponse) ProtoMessage() {} + +func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } -func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateVolumeResponse) ProtoMessage() {} -func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +// Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead. +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{15} +} type VolumeSyncStatusRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeSyncStatusRequest) Reset() { + *x = VolumeSyncStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeSyncStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeSyncStatusRequest) ProtoMessage() {} + +func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } -func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusRequest) ProtoMessage() {} -func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +// Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{16} +} -func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VolumeSyncStatusResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset" json:"tail_offset,omitempty"` - CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` + CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` +} + +func (x *VolumeSyncStatusResponse) Reset() { + *x = VolumeSyncStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeSyncStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeSyncStatusResponse) ProtoMessage() {} + +func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} } -func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +// Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{17} +} -func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeSyncStatusResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeSyncStatusResponse) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeSyncStatusResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *VolumeSyncStatusResponse) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *VolumeSyncStatusResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *VolumeSyncStatusResponse) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *VolumeSyncStatusResponse) GetTailOffset() uint64 { - if m != nil { - return m.TailOffset +func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 { + if x != nil { + return x.TailOffset } return 0 } -func (m *VolumeSyncStatusResponse) GetCompactRevision() uint32 { - if m != nil { - return m.CompactRevision +func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision } return 0 } -func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { - if m != nil { - return m.IdxFileSize +func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize } return 0 } type VolumeIncrementalCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` } -func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} } -func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeIncrementalCopyRequest) ProtoMessage() {} -func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (x *VolumeIncrementalCopyRequest) Reset() { + *x = VolumeIncrementalCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeIncrementalCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*VolumeIncrementalCopyRequest) ProtoMessage() {} + +func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{18} +} + +func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs } return 0 } type VolumeIncrementalCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } -func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} } -func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeIncrementalCopyResponse) ProtoMessage() {} -func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (x *VolumeIncrementalCopyResponse) Reset() { + *x = VolumeIncrementalCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeIncrementalCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeIncrementalCopyResponse) ProtoMessage() {} + +func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{19} +} -func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent +func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent } return nil } type VolumeMountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } -func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (x *VolumeMountRequest) Reset() { + *x = VolumeMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *VolumeMountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*VolumeMountRequest) ProtoMessage() {} + +func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{20} +} + +func (x *VolumeMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VolumeMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } -func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (x *VolumeMountResponse) Reset() { + *x = VolumeMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMountResponse) ProtoMessage() {} + +func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{21} +} type VolumeUnmountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeUnmountRequest) Reset() { + *x = VolumeUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } -func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (x *VolumeUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *VolumeUnmountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*VolumeUnmountRequest) ProtoMessage() {} + +func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{22} +} + +func (x *VolumeUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VolumeUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeUnmountResponse) Reset() { + *x = VolumeUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeUnmountResponse) ProtoMessage() {} + +func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } -func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +// Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{23} +} type VolumeDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } -func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (x *VolumeDeleteRequest) Reset() { + *x = VolumeDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VolumeDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteRequest) ProtoMessage() {} + +func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{24} +} + +func (x *VolumeDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VolumeDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeDeleteResponse) Reset() { + *x = VolumeDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } -func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (x *VolumeDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteResponse) ProtoMessage() {} + +func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{25} +} type VolumeMarkReadonlyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMarkReadonlyRequest) Reset() { + *x = VolumeMarkReadonlyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyRequest) ProtoMessage() {} + +func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} } -func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeMarkReadonlyRequest) ProtoMessage() {} -func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{26} +} -func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VolumeMarkReadonlyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMarkReadonlyResponse) Reset() { + *x = VolumeMarkReadonlyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyResponse) ProtoMessage() {} + +func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{27} +} + +type VolumeMarkWritableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMarkWritableRequest) Reset() { + *x = VolumeMarkWritableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkWritableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkWritableRequest) ProtoMessage() {} + +func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkWritableRequest.ProtoReflect.Descriptor instead. +func (*VolumeMarkWritableRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeMarkWritableResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMarkWritableResponse) Reset() { + *x = VolumeMarkWritableResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkWritableResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkWritableResponse) ProtoMessage() {} + +func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} } -func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeMarkReadonlyResponse) ProtoMessage() {} -func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +// Deprecated: Use VolumeMarkWritableResponse.ProtoReflect.Descriptor instead. +func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{29} +} + +type VolumeConfigureRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` +} + +func (x *VolumeConfigureRequest) Reset() { + *x = VolumeConfigureRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureRequest) ProtoMessage() {} + +func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead. +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{30} +} + +func (x *VolumeConfigureRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeConfigureRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *VolumeConfigureResponse) Reset() { + *x = VolumeConfigureResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureResponse) ProtoMessage() {} + +func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead. +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{31} +} + +func (x *VolumeConfigureResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type VolumeStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeStatusRequest) Reset() { + *x = VolumeStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeStatusRequest) ProtoMessage() {} + +func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{32} +} + +func (x *VolumeStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` +} + +func (x *VolumeStatusResponse) Reset() { + *x = VolumeStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeStatusResponse) ProtoMessage() {} + +func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{33} +} + +func (x *VolumeStatusResponse) GetIsReadOnly() bool { + if x != nil { + return x.IsReadOnly + } + return false +} type VolumeCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeCopyRequest) Reset() { + *x = VolumeCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } -func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeCopyRequest) ProtoMessage() {} -func (m *VolumeCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{34} +} + +func (x *VolumeCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeCopyRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeCopyRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeCopyRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *VolumeCopyRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *VolumeCopyRequest) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -func (m *VolumeCopyRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *VolumeCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode } return "" } -func (m *VolumeCopyRequest) GetSourceDataNode() string { - if m != nil { - return m.SourceDataNode +func (x *VolumeCopyRequest) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type VolumeCopyResponse struct { - LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs" json:"last_append_at_ns,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` +} + +func (x *VolumeCopyResponse) Reset() { + *x = VolumeCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } -func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeCopyResponse) ProtoMessage() {} -func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { - if m != nil { - return m.LastAppendAtNs +func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{35} +} + +func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 { + if x != nil { + return x.LastAppendAtNs } return 0 } type CopyFileRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` - CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` - IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound" json:"ignore_source_file_not_found,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` +} + +func (x *CopyFileRequest) Reset() { + *x = CopyFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } -func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } -func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*CopyFileRequest) ProtoMessage() {} -func (m *CopyFileRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead. +func (*CopyFileRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{36} +} + +func (x *CopyFileRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *CopyFileRequest) GetExt() string { - if m != nil { - return m.Ext +func (x *CopyFileRequest) GetExt() string { + if x != nil { + return x.Ext } return "" } -func (m *CopyFileRequest) GetCompactionRevision() uint32 { - if m != nil { - return m.CompactionRevision +func (x *CopyFileRequest) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision } return 0 } -func (m *CopyFileRequest) GetStopOffset() uint64 { - if m != nil { - return m.StopOffset +func (x *CopyFileRequest) GetStopOffset() uint64 { + if x != nil { + return x.StopOffset } return 0 } -func (m *CopyFileRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *CopyFileRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *CopyFileRequest) GetIsEcVolume() bool { - if m != nil { - return m.IsEcVolume +func (x *CopyFileRequest) GetIsEcVolume() bool { + if x != nil { + return x.IsEcVolume } return false } -func (m *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { - if m != nil { - return m.IgnoreSourceFileNotFound +func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { + if x != nil { + return x.IgnoreSourceFileNotFound } return false } type CopyFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } -func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } -func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } -func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (x *CopyFileResponse) Reset() { + *x = CopyFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyFileResponse) ProtoMessage() {} + +func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead. +func (*CopyFileResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{37} +} + +func (x *CopyFileResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent + } + return nil +} + +type ReadNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset + Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ReadNeedleBlobRequest) Reset() { + *x = ReadNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobRequest) ProtoMessage() {} + +func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{38} +} + +func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +type ReadNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *ReadNeedleBlobResponse) Reset() { + *x = ReadNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobResponse) ProtoMessage() {} + +func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{39} +} + +func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob + } + return nil +} + +type WriteNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *WriteNeedleBlobRequest) Reset() { + *x = WriteNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobRequest) ProtoMessage() {} + +func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{40} +} + +func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} -func (m *CopyFileResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent +func (x *WriteNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob } return nil } +type WriteNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *WriteNeedleBlobResponse) Reset() { + *x = WriteNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobResponse) ProtoMessage() {} + +func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{41} +} + type VolumeTailSenderRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` } -func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } -func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (x *VolumeTailSenderRequest) Reset() { + *x = VolumeTailSenderRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderRequest) ProtoMessage() {} -func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{42} +} + +func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeTailSenderRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +func (x *VolumeTailSenderRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs } return 0 } -func (m *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { - if m != nil { - return m.IdleTimeoutSeconds +func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds } return 0 } type VolumeTailSenderResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` - IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk" json:"is_last_chunk,omitempty"` + IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` } -func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } -func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (x *VolumeTailSenderResponse) Reset() { + *x = VolumeTailSenderResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderResponse) ProtoMessage() {} -func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { - if m != nil { - return m.NeedleHeader +func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{43} +} + +func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { + if x != nil { + return x.NeedleHeader } return nil } -func (m *VolumeTailSenderResponse) GetNeedleBody() []byte { - if m != nil { - return m.NeedleBody +func (x *VolumeTailSenderResponse) GetNeedleBody() []byte { + if x != nil { + return x.NeedleBody } return nil } -func (m *VolumeTailSenderResponse) GetIsLastChunk() bool { - if m != nil { - return m.IsLastChunk +func (x *VolumeTailSenderResponse) GetIsLastChunk() bool { + if x != nil { + return x.IsLastChunk } return false } type VolumeTailReceiverRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` - SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer" json:"source_volume_server,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` + SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` } -func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } -func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (x *VolumeTailReceiverRequest) Reset() { + *x = VolumeTailReceiverRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{44} +} + +func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeTailReceiverRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs } return 0 } -func (m *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { - if m != nil { - return m.IdleTimeoutSeconds +func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds } return 0 } -func (m *VolumeTailReceiverRequest) GetSourceVolumeServer() string { - if m != nil { - return m.SourceVolumeServer +func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string { + if x != nil { + return x.SourceVolumeServer } return "" } type VolumeTailReceiverResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } -func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (x *VolumeTailReceiverResponse) Reset() { + *x = VolumeTailReceiverResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverResponse) ProtoMessage() {} + +func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{45} +} type VolumeEcShardsGenerateRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } -func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } -func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (x *VolumeEcShardsGenerateRequest) Reset() { + *x = VolumeEcShardsGenerateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{46} +} + +func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsGenerateRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsGenerateRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type VolumeEcShardsGenerateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsGenerateResponse) Reset() { + *x = VolumeEcShardsGenerateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} + +func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{47} +} type VolumeEcShardsRebuildRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } -func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } -func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (x *VolumeEcShardsRebuildRequest) Reset() { + *x = VolumeEcShardsRebuildRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{48} +} + +func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsRebuildRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsRebuildRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type VolumeEcShardsRebuildResponse struct { - RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds" json:"rebuilt_shard_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` } -func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } -func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (x *VolumeEcShardsRebuildResponse) Reset() { + *x = VolumeEcShardsRebuildResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { - if m != nil { - return m.RebuiltShardIds +// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{49} +} + +func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { + if x != nil { + return x.RebuiltShardIds } return nil } type VolumeEcShardsCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` - CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` - CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile" json:"copy_ecj_file,omitempty"` - CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile" json:"copy_vif_file,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` + CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` } -func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } -func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (x *VolumeEcShardsCopyRequest) Reset() { + *x = VolumeEcShardsCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{50} +} + +func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsCopyRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsCopyRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds } return nil } -func (m *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { - if m != nil { - return m.CopyEcxFile +func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { + if x != nil { + return x.CopyEcxFile } return false } -func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string { - if m != nil { - return m.SourceDataNode +func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode } return "" } -func (m *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { - if m != nil { - return m.CopyEcjFile +func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { + if x != nil { + return x.CopyEcjFile } return false } -func (m *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { - if m != nil { - return m.CopyVifFile +func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { + if x != nil { + return x.CopyVifFile } return false } type VolumeEcShardsCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsCopyResponse) Reset() { + *x = VolumeEcShardsCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } -func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeEcShardsCopyResponse) ProtoMessage() {} + +func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{51} +} type VolumeEcShardsDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } -func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } -func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (x *VolumeEcShardsDeleteRequest) Reset() { + *x = VolumeEcShardsDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{52} +} + +func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsDeleteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds } return nil } type VolumeEcShardsDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } -func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (x *VolumeEcShardsDeleteResponse) Reset() { + *x = VolumeEcShardsDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{53} +} type VolumeEcShardsMountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsMountRequest) Reset() { + *x = VolumeEcShardsMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } -func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{54} +} + +func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsMountRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsMountRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardsMountRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds } return nil } type VolumeEcShardsMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsMountResponse) Reset() { + *x = VolumeEcShardsMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } -func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*VolumeEcShardsMountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{55} +} type VolumeEcShardsUnmountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsUnmountRequest) Reset() { + *x = VolumeEcShardsUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } -func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{56} +} + +func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds } return nil } type VolumeEcShardsUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsUnmountResponse) Reset() { + *x = VolumeEcShardsUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } -func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (x *VolumeEcShardsUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{57} +} type VolumeEcShardReadRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` - Offset int64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` } -func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } -func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (x *VolumeEcShardReadRequest) Reset() { + *x = VolumeEcShardReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadRequest) ProtoMessage() {} + +func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{58} +} + +func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardReadRequest) GetShardId() uint32 { - if m != nil { - return m.ShardId +func (x *VolumeEcShardReadRequest) GetShardId() uint32 { + if x != nil { + return x.ShardId } return 0 } -func (m *VolumeEcShardReadRequest) GetOffset() int64 { - if m != nil { - return m.Offset +func (x *VolumeEcShardReadRequest) GetOffset() int64 { + if x != nil { + return x.Offset } return 0 } -func (m *VolumeEcShardReadRequest) GetSize() int64 { - if m != nil { - return m.Size +func (x *VolumeEcShardReadRequest) GetSize() int64 { + if x != nil { + return x.Size } return 0 } -func (m *VolumeEcShardReadRequest) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *VolumeEcShardReadRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey } return 0 } type VolumeEcShardReadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted" json:"is_deleted,omitempty"` + IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` +} + +func (x *VolumeEcShardReadResponse) Reset() { + *x = VolumeEcShardReadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } -func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (x *VolumeEcShardReadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (m *VolumeEcShardReadResponse) GetData() []byte { - if m != nil { - return m.Data +func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{59} +} + +func (x *VolumeEcShardReadResponse) GetData() []byte { + if x != nil { + return x.Data } return nil } -func (m *VolumeEcShardReadResponse) GetIsDeleted() bool { - if m != nil { - return m.IsDeleted +func (x *VolumeEcShardReadResponse) GetIsDeleted() bool { + if x != nil { + return x.IsDeleted } return false } type VolumeEcBlobDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` - Version uint32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` } -func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } -func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (x *VolumeEcBlobDeleteRequest) Reset() { + *x = VolumeEcBlobDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcBlobDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{60} +} + +func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcBlobDeleteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcBlobDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey } return 0 } -func (m *VolumeEcBlobDeleteRequest) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } type VolumeEcBlobDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } -func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (x *VolumeEcBlobDeleteResponse) Reset() { + *x = VolumeEcBlobDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcBlobDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{61} +} type VolumeEcShardsToVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } -func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } -func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} -func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (x *VolumeEcShardsToVolumeRequest) Reset() { + *x = VolumeEcShardsToVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeEcShardsToVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{62} +} + +func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardsToVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type VolumeEcShardsToVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsToVolumeResponse) Reset() { + *x = VolumeEcShardsToVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsToVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{63} +} type ReadVolumeFileStatusRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } -func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (x *ReadVolumeFileStatusRequest) Reset() { + *x = ReadVolumeFileStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*ReadVolumeFileStatusRequest) ProtoMessage() {} + +func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{64} +} + +func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type ReadVolumeFileStatusResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds" json:"idx_file_timestamp_seconds,omitempty"` - IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` - DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds" json:"dat_file_timestamp_seconds,omitempty"` - DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - Collection string `protobuf:"bytes,8,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` + DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` + DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } -func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } -func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (x *ReadVolumeFileStatusResponse) Reset() { + *x = ReadVolumeFileStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (*ReadVolumeFileStatusResponse) ProtoMessage() {} + +func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{65} +} + +func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { - if m != nil { - return m.IdxFileTimestampSeconds +func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { + if x != nil { + return x.IdxFileTimestampSeconds } return 0 } -func (m *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { - if m != nil { - return m.IdxFileSize +func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize } return 0 } -func (m *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { - if m != nil { - return m.DatFileTimestampSeconds +func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { + if x != nil { + return x.DatFileTimestampSeconds } return 0 } -func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { - if m != nil { - return m.DatFileSize +func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { + if x != nil { + return x.DatFileSize } return 0 } -func (m *ReadVolumeFileStatusResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount } return 0 } -func (m *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { - if m != nil { - return m.CompactionRevision +func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision } return 0 } -func (m *ReadVolumeFileStatusResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *ReadVolumeFileStatusResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *ReadVolumeFileStatusResponse) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *DiskStatus) Reset() { + *x = DiskStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DiskStatus) Reset() { *m = DiskStatus{} } -func (m *DiskStatus) String() string { return proto.CompactTextString(m) } -func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*DiskStatus) ProtoMessage() {} + +func (x *DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. +func (*DiskStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{66} +} -func (m *DiskStatus) GetDir() string { - if m != nil { - return m.Dir +func (x *DiskStatus) GetDir() string { + if x != nil { + return x.Dir } return "" } -func (m *DiskStatus) GetAll() uint64 { - if m != nil { - return m.All +func (x *DiskStatus) GetAll() uint64 { + if x != nil { + return x.All } return 0 } -func (m *DiskStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (x *DiskStatus) GetUsed() uint64 { + if x != nil { + return x.Used } return 0 } -func (m *DiskStatus) GetFree() uint64 { - if m != nil { - return m.Free +func (x *DiskStatus) GetFree() uint64 { + if x != nil { + return x.Free } return 0 } +func (x *DiskStatus) GetPercentFree() float32 { + if x != nil { + return x.PercentFree + } + return 0 +} + +func (x *DiskStatus) GetPercentUsed() float32 { + if x != nil { + return x.PercentUsed + } + return 0 +} + +func (x *DiskStatus) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type MemStatus struct { - Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` - Self uint64 `protobuf:"varint,5,opt,name=self" json:"self,omitempty"` - Heap uint64 `protobuf:"varint,6,opt,name=heap" json:"heap,omitempty"` - Stack uint64 `protobuf:"varint,7,opt,name=stack" json:"stack,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` + Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` + Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` +} + +func (x *MemStatus) Reset() { + *x = MemStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MemStatus) Reset() { *m = MemStatus{} } -func (m *MemStatus) String() string { return proto.CompactTextString(m) } -func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (x *MemStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *MemStatus) GetGoroutines() int32 { - if m != nil { - return m.Goroutines +func (*MemStatus) ProtoMessage() {} + +func (x *MemStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. +func (*MemStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{67} +} + +func (x *MemStatus) GetGoroutines() int32 { + if x != nil { + return x.Goroutines } return 0 } -func (m *MemStatus) GetAll() uint64 { - if m != nil { - return m.All +func (x *MemStatus) GetAll() uint64 { + if x != nil { + return x.All } return 0 } -func (m *MemStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (x *MemStatus) GetUsed() uint64 { + if x != nil { + return x.Used } return 0 } -func (m *MemStatus) GetFree() uint64 { - if m != nil { - return m.Free +func (x *MemStatus) GetFree() uint64 { + if x != nil { + return x.Free } return 0 } -func (m *MemStatus) GetSelf() uint64 { - if m != nil { - return m.Self +func (x *MemStatus) GetSelf() uint64 { + if x != nil { + return x.Self } return 0 } -func (m *MemStatus) GetHeap() uint64 { - if m != nil { - return m.Heap +func (x *MemStatus) GetHeap() uint64 { + if x != nil { + return x.Heap } return 0 } -func (m *MemStatus) GetStack() uint64 { - if m != nil { - return m.Stack +func (x *MemStatus) GetStack() uint64 { + if x != nil { + return x.Stack } return 0 } // tired storage on volume servers type RemoteFile struct { - BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType" json:"backend_type,omitempty"` - BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId" json:"backend_id,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` - Offset uint64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"` - FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` - ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime" json:"modified_time,omitempty"` - Extension string `protobuf:"bytes,7,opt,name=extension" json:"extension,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` +} + +func (x *RemoteFile) Reset() { + *x = RemoteFile{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoteFile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoteFile) ProtoMessage() {} + +func (x *RemoteFile) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *RemoteFile) Reset() { *m = RemoteFile{} } -func (m *RemoteFile) String() string { return proto.CompactTextString(m) } -func (*RemoteFile) ProtoMessage() {} -func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. +func (*RemoteFile) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68} +} -func (m *RemoteFile) GetBackendType() string { - if m != nil { - return m.BackendType +func (x *RemoteFile) GetBackendType() string { + if x != nil { + return x.BackendType } return "" } -func (m *RemoteFile) GetBackendId() string { - if m != nil { - return m.BackendId +func (x *RemoteFile) GetBackendId() string { + if x != nil { + return x.BackendId } return "" } -func (m *RemoteFile) GetKey() string { - if m != nil { - return m.Key +func (x *RemoteFile) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *RemoteFile) GetOffset() uint64 { - if m != nil { - return m.Offset +func (x *RemoteFile) GetOffset() uint64 { + if x != nil { + return x.Offset } return 0 } -func (m *RemoteFile) GetFileSize() uint64 { - if m != nil { - return m.FileSize +func (x *RemoteFile) GetFileSize() uint64 { + if x != nil { + return x.FileSize } return 0 } -func (m *RemoteFile) GetModifiedTime() uint64 { - if m != nil { - return m.ModifiedTime +func (x *RemoteFile) GetModifiedTime() uint64 { + if x != nil { + return x.ModifiedTime } return 0 } -func (m *RemoteFile) GetExtension() string { - if m != nil { - return m.Extension +func (x *RemoteFile) GetExtension() string { + if x != nil { + return x.Extension } return "" } type VolumeInfo struct { - Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` - Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` +} + +func (x *VolumeInfo) Reset() { + *x = VolumeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeInfo) ProtoMessage() {} + +func (x *VolumeInfo) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } -func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } -func (*VolumeInfo) ProtoMessage() {} -func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. +func (*VolumeInfo) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{69} +} -func (m *VolumeInfo) GetFiles() []*RemoteFile { - if m != nil { - return m.Files +func (x *VolumeInfo) GetFiles() []*RemoteFile { + if x != nil { + return x.Files } return nil } -func (m *VolumeInfo) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeInfo) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } +func (x *VolumeInfo) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + type VolumeTierMoveDatToRemoteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName" json:"destination_backend_name,omitempty"` - KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile" json:"keep_local_dat_file,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` +} + +func (x *VolumeTierMoveDatToRemoteRequest) Reset() { + *x = VolumeTierMoveDatToRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatToRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} + +func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMoveDatToRemoteRequest{} } -func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} +// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{60} + return file_volume_server_proto_rawDescGZIP(), []int{70} } -func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { - if m != nil { - return m.DestinationBackendName +func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { + if x != nil { + return x.DestinationBackendName } return "" } -func (m *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { - if m != nil { - return m.KeepLocalDatFile +func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { + if x != nil { + return x.KeepLocalDatFile } return false } type VolumeTierMoveDatToRemoteResponse struct { - Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` +} + +func (x *VolumeTierMoveDatToRemoteResponse) Reset() { + *x = VolumeTierMoveDatToRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatToRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMoveDatToRemoteResponse{} } -func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} +func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} + +func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{61} + return file_volume_server_proto_rawDescGZIP(), []int{71} } -func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { - if m != nil { - return m.Processed +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed } return 0 } -func (m *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { - if m != nil { - return m.ProcessedPercentage +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage } return 0 } type VolumeTierMoveDatFromRemoteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile" json:"keep_remote_dat_file,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` } -func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMoveDatFromRemoteRequest{} } -func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} +func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { + *x = VolumeTierMoveDatFromRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatFromRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} + +func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{62} + return file_volume_server_proto_rawDescGZIP(), []int{72} } -func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { - if m != nil { - return m.KeepRemoteDatFile +func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { + if x != nil { + return x.KeepRemoteDatFile } return false } type VolumeTierMoveDatFromRemoteResponse struct { - Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` +} + +func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { + *x = VolumeTierMoveDatFromRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatFromRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} + +func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierMoveDatFromRemoteResponse{} } -func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} +// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{63} + return file_volume_server_proto_rawDescGZIP(), []int{73} } -func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { - if m != nil { - return m.Processed +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed } return 0 } -func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { - if m != nil { - return m.ProcessedPercentage +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage } return 0 } +type VolumeServerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeServerStatusRequest) Reset() { + *x = VolumeServerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerStatusRequest) ProtoMessage() {} + +func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{74} +} + +type VolumeServerStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` +} + +func (x *VolumeServerStatusResponse) Reset() { + *x = VolumeServerStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerStatusResponse) ProtoMessage() {} + +func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{75} +} + +func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { + if x != nil { + return x.DiskStatuses + } + return nil +} + +func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { + if x != nil { + return x.MemoryStatus + } + return nil +} + +type VolumeServerLeaveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeServerLeaveRequest) Reset() { + *x = VolumeServerLeaveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerLeaveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerLeaveRequest) ProtoMessage() {} + +func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. +func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{76} +} + +type VolumeServerLeaveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeServerLeaveResponse) Reset() { + *x = VolumeServerLeaveResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerLeaveResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerLeaveResponse) ProtoMessage() {} + +func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. +func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{77} +} + // select on volume servers type QueryRequest struct { - Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"` - FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds" json:"from_file_ids,omitempty"` - Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"` - InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization" json:"input_serialization,omitempty"` - OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization" json:"output_serialization,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"` + FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"` + Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"` + OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"` +} + +func (x *QueryRequest) Reset() { + *x = QueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest) ProtoMessage() {} + +func (x *QueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. +func (*QueryRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78} } -func (m *QueryRequest) Reset() { *m = QueryRequest{} } -func (m *QueryRequest) String() string { return proto.CompactTextString(m) } -func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } +func (x *QueryRequest) GetSelections() []string { + if x != nil { + return x.Selections + } + return nil +} -func (m *QueryRequest) GetSelections() []string { - if m != nil { - return m.Selections +func (x *QueryRequest) GetFromFileIds() []string { + if x != nil { + return x.FromFileIds } return nil } -func (m *QueryRequest) GetFromFileIds() []string { - if m != nil { - return m.FromFileIds +func (x *QueryRequest) GetFilter() *QueryRequest_Filter { + if x != nil { + return x.Filter } return nil } -func (m *QueryRequest) GetFilter() *QueryRequest_Filter { - if m != nil { - return m.Filter +func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { + if x != nil { + return x.InputSerialization } return nil } -func (m *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { - if m != nil { - return m.InputSerialization +func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { + if x != nil { + return x.OutputSerialization } return nil } -func (m *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { - if m != nil { - return m.OutputSerialization +type QueriedStripe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` +} + +func (x *QueriedStripe) Reset() { + *x = QueriedStripe{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueriedStripe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueriedStripe) ProtoMessage() {} + +func (x *QueriedStripe) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. +func (*QueriedStripe) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{79} +} + +func (x *QueriedStripe) GetRecords() []byte { + if x != nil { + return x.Records } return nil } +type VolumeNeedleStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` +} + +func (x *VolumeNeedleStatusRequest) Reset() { + *x = VolumeNeedleStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeNeedleStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeNeedleStatusRequest) ProtoMessage() {} + +func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[80] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{80} +} + +func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeNeedleStatusRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +type VolumeNeedleStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"` + Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"` + Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *VolumeNeedleStatusResponse) Reset() { + *x = VolumeNeedleStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeNeedleStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeNeedleStatusResponse) ProtoMessage() {} + +func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[81] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{81} +} + +func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *VolumeNeedleStatusResponse) GetCookie() uint32 { + if x != nil { + return x.Cookie + } + return 0 +} + +func (x *VolumeNeedleStatusResponse) GetSize() uint32 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *VolumeNeedleStatusResponse) GetLastModified() uint64 { + if x != nil { + return x.LastModified + } + return 0 +} + +func (x *VolumeNeedleStatusResponse) GetCrc() uint32 { + if x != nil { + return x.Crc + } + return 0 +} + +func (x *VolumeNeedleStatusResponse) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + type QueryRequest_Filter struct { - Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` - Operand string `protobuf:"bytes,2,opt,name=operand" json:"operand,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } -func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} } +func (x *QueryRequest_Filter) Reset() { + *x = QueryRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *QueryRequest_Filter) GetField() string { - if m != nil { - return m.Field +func (x *QueryRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_Filter) ProtoMessage() {} + +func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[82] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 0} +} + +func (x *QueryRequest_Filter) GetField() string { + if x != nil { + return x.Field } return "" } -func (m *QueryRequest_Filter) GetOperand() string { - if m != nil { - return m.Operand +func (x *QueryRequest_Filter) GetOperand() string { + if x != nil { + return x.Operand } return "" } -func (m *QueryRequest_Filter) GetValue() string { - if m != nil { - return m.Value +func (x *QueryRequest_Filter) GetValue() string { + if x != nil { + return x.Value } return "" } type QueryRequest_InputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // NONE | GZIP | BZIP2 - CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType" json:"compression_type,omitempty"` - CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput" json:"csv_input,omitempty"` - JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput" json:"json_input,omitempty"` - ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput" json:"parquet_input,omitempty"` + CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"` + CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"` + JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"` + ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"` +} + +func (x *QueryRequest_InputSerialization) Reset() { + *x = QueryRequest_InputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_InputSerialization) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[83] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_InputSerialization{} } -func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization) ProtoMessage() {} +// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1} } -func (m *QueryRequest_InputSerialization) GetCompressionType() string { - if m != nil { - return m.CompressionType +func (x *QueryRequest_InputSerialization) GetCompressionType() string { + if x != nil { + return x.CompressionType } return "" } -func (m *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { - if m != nil { - return m.CsvInput +func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { + if x != nil { + return x.CsvInput } return nil } -func (m *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { - if m != nil { - return m.JsonInput +func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { + if x != nil { + return x.JsonInput } return nil } -func (m *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { - if m != nil { - return m.ParquetInput +func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { + if x != nil { + return x.ParquetInput + } + return nil +} + +type QueryRequest_OutputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` + JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` +} + +func (x *QueryRequest_OutputSerialization) Reset() { + *x = QueryRequest_OutputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_OutputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_OutputSerialization) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[84] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 2} +} + +func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { + if x != nil { + return x.CsvOutput + } + return nil +} + +func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { + if x != nil { + return x.JsonOutput } return nil } type QueryRequest_InputSerialization_CSVInput struct { - FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo" json:"file_header_info,omitempty"` - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"` - QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"` - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"` - Comments string `protobuf:"bytes,6,opt,name=comments" json:"comments,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " + Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # // If true, records might contain record delimiters within quote characters - AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter" json:"allow_quoted_record_delimiter,omitempty"` + AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False. +} + +func (x *QueryRequest_InputSerialization_CSVInput) Reset() { + *x = QueryRequest_InputSerialization_CSVInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_InputSerialization_CSVInput) Reset() { - *m = QueryRequest_InputSerialization_CSVInput{} +func (x *QueryRequest_InputSerialization_CSVInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} + +func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[85] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 0} } -func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { - if m != nil { - return m.FileHeaderInfo +func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { + if x != nil { + return x.FileHeaderInfo } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { - if m != nil { - return m.FieldDelimiter +func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { - if m != nil { - return m.QuoteCharactoer +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { - if m != nil { - return m.QuoteEscapeCharacter +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetComments() string { - if m != nil { - return m.Comments +func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string { + if x != nil { + return x.Comments } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { - if m != nil { - return m.AllowQuotedRecordDelimiter +func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { + if x != nil { + return x.AllowQuotedRecordDelimiter } return false } type QueryRequest_InputSerialization_JSONInput struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES +} + +func (x *QueryRequest_InputSerialization_JSONInput) Reset() { + *x = QueryRequest_InputSerialization_JSONInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_InputSerialization_JSONInput) Reset() { - *m = QueryRequest_InputSerialization_JSONInput{} +func (x *QueryRequest_InputSerialization_JSONInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} + +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[86] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 1} } -func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { - if m != nil { - return m.Type +func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { + if x != nil { + return x.Type } return "" } type QueryRequest_InputSerialization_ParquetInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *QueryRequest_InputSerialization_ParquetInput) Reset() { - *m = QueryRequest_InputSerialization_ParquetInput{} -} -func (m *QueryRequest_InputSerialization_ParquetInput) String() string { - return proto.CompactTextString(m) -} -func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} -func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 2} +func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { + *x = QueryRequest_InputSerialization_ParquetInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type QueryRequest_OutputSerialization struct { - CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput" json:"csv_output,omitempty"` - JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput" json:"json_output,omitempty"` +func (x *QueryRequest_InputSerialization_ParquetInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_OutputSerialization{} } -func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_OutputSerialization) ProtoMessage() {} -func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2} -} +func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} -func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { - if m != nil { - return m.CsvOutput +func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[87] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { - if m != nil { - return m.JsonOutput - } - return nil +// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { - QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields" json:"quote_fields,omitempty"` - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"` - QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"` - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " } -func (m *QueryRequest_OutputSerialization_CSVOutput) Reset() { - *m = QueryRequest_OutputSerialization_CSVOutput{} +func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { + *x = QueryRequest_OutputSerialization_CSVOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { - return proto.CompactTextString(m) + +func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { + return protoimpl.X.MessageStringOf(x) } + func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[88] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 0} } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { - if m != nil { - return m.QuoteFields +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { + if x != nil { + return x.QuoteFields } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { - if m != nil { - return m.FieldDelimiter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { - if m != nil { - return m.QuoteCharactoer +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { - if m != nil { - return m.QuoteEscapeCharacter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter } return "" } type QueryRequest_OutputSerialization_JSONOutput struct { - RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` } -func (m *QueryRequest_OutputSerialization_JSONOutput) Reset() { - *m = QueryRequest_OutputSerialization_JSONOutput{} +func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { + *x = QueryRequest_OutputSerialization_JSONOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { - return proto.CompactTextString(m) + +func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { + return protoimpl.X.MessageStringOf(x) } + func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} -func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 1} -} -func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[89] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type QueriedStripe struct { - Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` +// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 1} } -func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } -func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } -func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } - -func (m *QueriedStripe) GetRecords() []byte { - if m != nil { - return m.Records +func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } - return nil + return "" } -func init() { - proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest") - proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse") - proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult") - proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty") - proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest") - proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse") - proto.RegisterType((*VacuumVolumeCompactRequest)(nil), "volume_server_pb.VacuumVolumeCompactRequest") - proto.RegisterType((*VacuumVolumeCompactResponse)(nil), "volume_server_pb.VacuumVolumeCompactResponse") - proto.RegisterType((*VacuumVolumeCommitRequest)(nil), "volume_server_pb.VacuumVolumeCommitRequest") - proto.RegisterType((*VacuumVolumeCommitResponse)(nil), "volume_server_pb.VacuumVolumeCommitResponse") - proto.RegisterType((*VacuumVolumeCleanupRequest)(nil), "volume_server_pb.VacuumVolumeCleanupRequest") - proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse") - proto.RegisterType((*AllocateVolumeRequest)(nil), "volume_server_pb.AllocateVolumeRequest") - proto.RegisterType((*AllocateVolumeResponse)(nil), "volume_server_pb.AllocateVolumeResponse") - proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") - proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") - proto.RegisterType((*VolumeIncrementalCopyRequest)(nil), "volume_server_pb.VolumeIncrementalCopyRequest") - proto.RegisterType((*VolumeIncrementalCopyResponse)(nil), "volume_server_pb.VolumeIncrementalCopyResponse") - proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest") - proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse") - proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest") - proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse") - proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest") - proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") - proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") - proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") - proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") - proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") - proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") - proto.RegisterType((*CopyFileResponse)(nil), "volume_server_pb.CopyFileResponse") - proto.RegisterType((*VolumeTailSenderRequest)(nil), "volume_server_pb.VolumeTailSenderRequest") - proto.RegisterType((*VolumeTailSenderResponse)(nil), "volume_server_pb.VolumeTailSenderResponse") - proto.RegisterType((*VolumeTailReceiverRequest)(nil), "volume_server_pb.VolumeTailReceiverRequest") - proto.RegisterType((*VolumeTailReceiverResponse)(nil), "volume_server_pb.VolumeTailReceiverResponse") - proto.RegisterType((*VolumeEcShardsGenerateRequest)(nil), "volume_server_pb.VolumeEcShardsGenerateRequest") - proto.RegisterType((*VolumeEcShardsGenerateResponse)(nil), "volume_server_pb.VolumeEcShardsGenerateResponse") - proto.RegisterType((*VolumeEcShardsRebuildRequest)(nil), "volume_server_pb.VolumeEcShardsRebuildRequest") - proto.RegisterType((*VolumeEcShardsRebuildResponse)(nil), "volume_server_pb.VolumeEcShardsRebuildResponse") - proto.RegisterType((*VolumeEcShardsCopyRequest)(nil), "volume_server_pb.VolumeEcShardsCopyRequest") - proto.RegisterType((*VolumeEcShardsCopyResponse)(nil), "volume_server_pb.VolumeEcShardsCopyResponse") - proto.RegisterType((*VolumeEcShardsDeleteRequest)(nil), "volume_server_pb.VolumeEcShardsDeleteRequest") - proto.RegisterType((*VolumeEcShardsDeleteResponse)(nil), "volume_server_pb.VolumeEcShardsDeleteResponse") - proto.RegisterType((*VolumeEcShardsMountRequest)(nil), "volume_server_pb.VolumeEcShardsMountRequest") - proto.RegisterType((*VolumeEcShardsMountResponse)(nil), "volume_server_pb.VolumeEcShardsMountResponse") - proto.RegisterType((*VolumeEcShardsUnmountRequest)(nil), "volume_server_pb.VolumeEcShardsUnmountRequest") - proto.RegisterType((*VolumeEcShardsUnmountResponse)(nil), "volume_server_pb.VolumeEcShardsUnmountResponse") - proto.RegisterType((*VolumeEcShardReadRequest)(nil), "volume_server_pb.VolumeEcShardReadRequest") - proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse") - proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest") - proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse") - proto.RegisterType((*VolumeEcShardsToVolumeRequest)(nil), "volume_server_pb.VolumeEcShardsToVolumeRequest") - proto.RegisterType((*VolumeEcShardsToVolumeResponse)(nil), "volume_server_pb.VolumeEcShardsToVolumeResponse") - proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") - proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") - proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") - proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") - proto.RegisterType((*RemoteFile)(nil), "volume_server_pb.RemoteFile") - proto.RegisterType((*VolumeInfo)(nil), "volume_server_pb.VolumeInfo") - proto.RegisterType((*VolumeTierMoveDatToRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteRequest") - proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse") - proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest") - proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse") - proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest") - proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter") - proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization") - proto.RegisterType((*QueryRequest_InputSerialization_CSVInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.CSVInput") - proto.RegisterType((*QueryRequest_InputSerialization_JSONInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.JSONInput") - proto.RegisterType((*QueryRequest_InputSerialization_ParquetInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.ParquetInput") - proto.RegisterType((*QueryRequest_OutputSerialization)(nil), "volume_server_pb.QueryRequest.OutputSerialization") - proto.RegisterType((*QueryRequest_OutputSerialization_CSVOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.CSVOutput") - proto.RegisterType((*QueryRequest_OutputSerialization_JSONOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.JSONOutput") - proto.RegisterType((*QueriedStripe)(nil), "volume_server_pb.QueriedStripe") +var File_volume_server_proto protoreflect.FileDescriptor + +var file_volume_server_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, + 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a, + 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61, + 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, + 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, + 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x1d, 0x0a, 0x1b, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, + 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, + 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6c, 0x4f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, + 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x42, 0x0a, 0x1d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x14, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, + 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, + 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, + 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, + 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x32, + 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x22, 0x38, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xcb, 0x01, 0x0a, + 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, + 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73, + 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f, + 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, + 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a, + 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, + 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, + 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, + 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69, + 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, + 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, + 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, + 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, + 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, + 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, + 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, + 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, + 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, + 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, + 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, + 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, + 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, + 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, + 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, + 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, + 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, + 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, + 0x74, 0x6c, 0x32, 0xa9, 0x21, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, + 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, + 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, + 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, + 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, + 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77, + 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, + 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, + 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, + 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, + 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, + 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, + 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, + 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x12, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, + 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, + 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, + 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, + 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, + 0x76, 0x65, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, + 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, + 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x39, + 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, + 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_volume_server_proto_rawDescOnce sync.Once + file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc +) + +func file_volume_server_proto_rawDescGZIP() []byte { + file_volume_server_proto_rawDescOnce.Do(func() { + file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData) + }) + return file_volume_server_proto_rawDescData +} + +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 90) +var file_volume_server_proto_goTypes = []interface{}{ + (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest + (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse + (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult + (*Empty)(nil), // 3: volume_server_pb.Empty + (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest + (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse + (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest + (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse + (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest + (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse + (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest + (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse + (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse + (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest + (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse + (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest + (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse + (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest + (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse + (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest + (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse + (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest + (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse + (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest + (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse + (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest + (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse + (*VolumeMarkWritableRequest)(nil), // 28: volume_server_pb.VolumeMarkWritableRequest + (*VolumeMarkWritableResponse)(nil), // 29: volume_server_pb.VolumeMarkWritableResponse + (*VolumeConfigureRequest)(nil), // 30: volume_server_pb.VolumeConfigureRequest + (*VolumeConfigureResponse)(nil), // 31: volume_server_pb.VolumeConfigureResponse + (*VolumeStatusRequest)(nil), // 32: volume_server_pb.VolumeStatusRequest + (*VolumeStatusResponse)(nil), // 33: volume_server_pb.VolumeStatusResponse + (*VolumeCopyRequest)(nil), // 34: volume_server_pb.VolumeCopyRequest + (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse + (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest + (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse + (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest + (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse + (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest + (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse + (*VolumeTailSenderRequest)(nil), // 42: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 43: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 44: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 45: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 46: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 47: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 48: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 49: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 50: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 51: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 54: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 55: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 58: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 59: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 60: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 61: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 62: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 63: volume_server_pb.VolumeEcShardsToVolumeResponse + (*ReadVolumeFileStatusRequest)(nil), // 64: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 65: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 66: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 67: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 68: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 69: volume_server_pb.VolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 70: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 71: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 74: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 75: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 76: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 77: volume_server_pb.VolumeServerLeaveResponse + (*QueryRequest)(nil), // 78: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 79: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 80: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 81: volume_server_pb.VolumeNeedleStatusResponse + (*QueryRequest_Filter)(nil), // 82: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 83: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 84: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 85: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 86: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 87: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 88: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 89: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput +} +var file_volume_server_proto_depIdxs = []int32{ + 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult + 68, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 66, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 67, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 82, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 83, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 84, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 85, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 86, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 87, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 88, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 89, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 15: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 16: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 17: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 18: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 19: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 20: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 21: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 22: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 23: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 24: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 25: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 30, // 26: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 32, // 27: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 34, // 28: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 64, // 29: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 36, // 30: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 38, // 31: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 40, // 32: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 42, // 33: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 44, // 34: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 52, // 38: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 56, // 40: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 58, // 41: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 60, // 42: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 62, // 43: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 70, // 44: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 72, // 45: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 74, // 46: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 76, // 47: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 78, // 48: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 80, // 49: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 1, // 50: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 51: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 52: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 53: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 54: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 55: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 56: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 57: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 58: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 59: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 60: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 61: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 62: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 63: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 64: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 65: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 66: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 65, // 67: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 68: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 39, // 69: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 41, // 70: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 43, // 71: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 45, // 72: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 47, // 73: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 49, // 74: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 51, // 75: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 53, // 76: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 55, // 77: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 57, // 78: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 59, // 79: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 61, // 80: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 63, // 81: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 71, // 82: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 73, // 83: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 75, // 84: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 77, // 85: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 79, // 86: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 81, // 87: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 50, // [50:88] is the sub-list for method output_type + 12, // [12:50] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_volume_server_proto_init() } +func file_volume_server_proto_init() { + if File_volume_server_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteFile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueriedStripe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_volume_server_proto_rawDesc, + NumEnums: 0, + NumMessages: 90, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_volume_server_proto_goTypes, + DependencyIndexes: file_volume_server_proto_depIdxs, + MessageInfos: file_volume_server_proto_msgTypes, + }.Build() + File_volume_server_proto = out.File + file_volume_server_proto_rawDesc = nil + file_volume_server_proto_goTypes = nil + file_volume_server_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for VolumeServer service +const _ = grpc.SupportPackageIsVersion6 +// VolumeServerClient is the client API for VolumeServer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VolumeServerClient interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) @@ -2122,10 +7270,15 @@ type VolumeServerClient interface { VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) + VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding @@ -2141,21 +7294,24 @@ type VolumeServerClient interface { // tiered storage VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) - // query + VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) + VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) + // <experimental> query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) + VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) } type volumeServerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewVolumeServerClient(cc *grpc.ClientConn) VolumeServerClient { +func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { return &volumeServerClient{cc} } func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { out := new(BatchDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) if err != nil { return nil, err } @@ -2164,7 +7320,7 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { out := new(VacuumVolumeCheckResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) if err != nil { return nil, err } @@ -2173,7 +7329,7 @@ func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVo func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) { out := new(VacuumVolumeCompactResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, opts...) if err != nil { return nil, err } @@ -2182,7 +7338,7 @@ func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *Vacuum func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { out := new(VacuumVolumeCommitResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) if err != nil { return nil, err } @@ -2191,7 +7347,7 @@ func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumV func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { out := new(VacuumVolumeCleanupResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) if err != nil { return nil, err } @@ -2200,7 +7356,7 @@ func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *Vacuum func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -2209,7 +7365,7 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { out := new(AllocateVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) if err != nil { return nil, err } @@ -2218,7 +7374,7 @@ func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVol func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { out := new(VolumeSyncStatusResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) if err != nil { return nil, err } @@ -2226,7 +7382,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn } func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } @@ -2259,7 +7415,7 @@ func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopy func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { out := new(VolumeMountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) if err != nil { return nil, err } @@ -2268,7 +7424,7 @@ func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountReq func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { out := new(VolumeUnmountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) if err != nil { return nil, err } @@ -2277,7 +7433,7 @@ func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmoun func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { out := new(VolumeDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) if err != nil { return nil, err } @@ -2286,7 +7442,34 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { out := new(VolumeMarkReadonlyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) { + out := new(VolumeMarkWritableResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) { + out := new(VolumeStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...) if err != nil { return nil, err } @@ -2295,7 +7478,7 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { out := new(VolumeCopyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, opts...) if err != nil { return nil, err } @@ -2304,7 +7487,7 @@ func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyReque func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { out := new(ReadVolumeFileStatusResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) if err != nil { return nil, err } @@ -2312,7 +7495,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -2343,8 +7526,26 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { return m, nil } +func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { + out := new(ReadNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { + out := new(WriteNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } @@ -2377,7 +7578,7 @@ func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { out := new(VolumeTailReceiverResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) if err != nil { return nil, err } @@ -2386,7 +7587,7 @@ func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeT func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { out := new(VolumeEcShardsGenerateResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) if err != nil { return nil, err } @@ -2395,7 +7596,7 @@ func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *Vol func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { out := new(VolumeEcShardsRebuildResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) if err != nil { return nil, err } @@ -2404,7 +7605,7 @@ func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *Volu func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { out := new(VolumeEcShardsCopyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) if err != nil { return nil, err } @@ -2413,7 +7614,7 @@ func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeE func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { out := new(VolumeEcShardsDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) if err != nil { return nil, err } @@ -2422,7 +7623,7 @@ func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *Volum func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { out := new(VolumeEcShardsMountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) if err != nil { return nil, err } @@ -2431,7 +7632,7 @@ func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *Volume func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { out := new(VolumeEcShardsUnmountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) if err != nil { return nil, err } @@ -2439,7 +7640,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } @@ -2472,7 +7673,7 @@ func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { out := new(VolumeEcBlobDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) if err != nil { return nil, err } @@ -2481,7 +7682,7 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { out := new(VolumeEcShardsToVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) if err != nil { return nil, err } @@ -2489,7 +7690,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } @@ -2521,7 +7722,7 @@ func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDat } func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } @@ -2552,8 +7753,26 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD return m, nil } +func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { + out := new(VolumeServerStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) { + out := new(VolumeServerLeaveResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } @@ -2584,10 +7803,18 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { return m, nil } -// Server API for VolumeServer service +func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) { + out := new(VolumeNeedleStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// VolumeServerServer is the server API for VolumeServer service. type VolumeServerServer interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) @@ -2601,10 +7828,15 @@ type VolumeServerServer interface { VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) + VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding @@ -2620,8 +7852,130 @@ type VolumeServerServer interface { // tiered storage VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error - // query + VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) + VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) + // <experimental> query Query(*QueryRequest, VolumeServer_QueryServer) error + VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) +} + +// UnimplementedVolumeServerServer can be embedded to have forward compatible implementations. +type UnimplementedVolumeServerServer struct { +} + +func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented") +} +func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkWritable not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") +} +func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") +} +func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { + return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") +} +func (*UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented") +} +func (*UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeServerLeave not implemented") +} +func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { + return status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeNeedleStatus not implemented") } func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { @@ -2865,6 +8219,60 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeMarkWritableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeMarkWritable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeCopyRequest) if err := dec(in); err != nil { @@ -2922,6 +8330,42 @@ func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { return x.ServerStream.SendMsg(m) } +func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { @@ -3168,6 +8612,42 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDa return x.ServerStream.SendMsg(m) } +func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerLeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerLeave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { @@ -3189,6 +8669,24 @@ func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { return x.ServerStream.SendMsg(m) } +func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeNeedleStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), @@ -3242,6 +8740,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, { + MethodName: "VolumeMarkWritable", + Handler: _VolumeServer_VolumeMarkWritable_Handler, + }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, + { + MethodName: "VolumeStatus", + Handler: _VolumeServer_VolumeStatus_Handler, + }, + { MethodName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, }, @@ -3250,6 +8760,14 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_ReadVolumeFileStatus_Handler, }, { + MethodName: "ReadNeedleBlob", + Handler: _VolumeServer_ReadNeedleBlob_Handler, + }, + { + MethodName: "WriteNeedleBlob", + Handler: _VolumeServer_WriteNeedleBlob_Handler, + }, + { MethodName: "VolumeTailReceiver", Handler: _VolumeServer_VolumeTailReceiver_Handler, }, @@ -3285,6 +8803,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcShardsToVolume", Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, }, + { + MethodName: "VolumeServerStatus", + Handler: _VolumeServer_VolumeServerStatus_Handler, + }, + { + MethodName: "VolumeServerLeave", + Handler: _VolumeServer_VolumeServerLeave_Handler, + }, + { + MethodName: "VolumeNeedleStatus", + Handler: _VolumeServer_VolumeNeedleStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3325,191 +8855,3 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ }, Metadata: "volume_server.proto", } - -func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x73, 0xdc, 0xc6, - 0xb1, 0x5c, 0x2e, 0x3f, 0x76, 0x7b, 0x49, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0xa2, 0x24, 0x1a, 0xf2, - 0x87, 0x24, 0x5b, 0x94, 0x4c, 0xdb, 0xcf, 0x7e, 0xf6, 0xb3, 0xdf, 0x93, 0x28, 0xe9, 0x45, 0xb1, - 0x45, 0xd9, 0xa0, 0xac, 0x38, 0xb6, 0x2b, 0xa8, 0x21, 0x30, 0x2b, 0xc2, 0x04, 0x30, 0x10, 0x30, - 0x4b, 0x6b, 0x55, 0xc9, 0xc9, 0x39, 0xa4, 0x2a, 0x95, 0x1c, 0x52, 0xb9, 0xe4, 0x9c, 0x7b, 0xae, - 0xf9, 0x03, 0x39, 0xf8, 0x0f, 0xa4, 0x2a, 0xa7, 0x5c, 0x72, 0xce, 0x21, 0xb7, 0x54, 0xe5, 0x92, - 0x9a, 0x2f, 0x2c, 0x3e, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0xb7, 0x41, 0x4f, 0x7f, 0x4c, 0xf7, 0x74, - 0xf7, 0x4c, 0x4f, 0x03, 0x56, 0x0e, 0xa9, 0x3f, 0x0c, 0x88, 0x9d, 0x90, 0xf8, 0x90, 0xc4, 0x9b, - 0x51, 0x4c, 0x19, 0x45, 0xcb, 0x39, 0xa0, 0x1d, 0xed, 0x99, 0xd7, 0x00, 0xdd, 0xc4, 0xcc, 0xd9, - 0xbf, 0x45, 0x7c, 0xc2, 0x88, 0x45, 0x1e, 0x0f, 0x49, 0xc2, 0xd0, 0xf3, 0xd0, 0x19, 0x78, 0x3e, - 0xb1, 0x3d, 0x37, 0xe9, 0xb7, 0x36, 0xda, 0x97, 0xba, 0xd6, 0x3c, 0xff, 0xbe, 0xeb, 0x26, 0xe6, - 0x7d, 0x58, 0xc9, 0x11, 0x24, 0x11, 0x0d, 0x13, 0x82, 0xde, 0x81, 0xf9, 0x98, 0x24, 0x43, 0x9f, - 0x49, 0x82, 0xde, 0xd6, 0xf9, 0xcd, 0xa2, 0xac, 0xcd, 0x94, 0x64, 0xe8, 0x33, 0x4b, 0xa3, 0x9b, - 0xdf, 0xb4, 0x60, 0x21, 0x3b, 0x83, 0xce, 0xc0, 0xbc, 0x12, 0xde, 0x6f, 0x6d, 0xb4, 0x2e, 0x75, - 0xad, 0x39, 0x29, 0x1b, 0xad, 0xc1, 0x5c, 0xc2, 0x30, 0x1b, 0x26, 0xfd, 0xe9, 0x8d, 0xd6, 0xa5, - 0x59, 0x4b, 0x7d, 0xa1, 0x55, 0x98, 0x25, 0x71, 0x4c, 0xe3, 0x7e, 0x5b, 0xa0, 0xcb, 0x0f, 0x84, - 0x60, 0x26, 0xf1, 0x9e, 0x92, 0xfe, 0xcc, 0x46, 0xeb, 0xd2, 0xa2, 0x25, 0xc6, 0xa8, 0x0f, 0xf3, - 0x87, 0x24, 0x4e, 0x3c, 0x1a, 0xf6, 0x67, 0x05, 0x58, 0x7f, 0x9a, 0xf3, 0x30, 0x7b, 0x3b, 0x88, - 0xd8, 0xc8, 0x7c, 0x1b, 0xfa, 0x0f, 0xb1, 0x33, 0x1c, 0x06, 0x0f, 0xc5, 0xf2, 0xb7, 0xf7, 0x89, - 0x73, 0xa0, 0xcd, 0x72, 0x16, 0xba, 0x4a, 0x29, 0xb5, 0xb6, 0x45, 0xab, 0x23, 0x01, 0x77, 0x5d, - 0xf3, 0xff, 0xe0, 0xf9, 0x0a, 0x42, 0x65, 0x9e, 0x8b, 0xb0, 0xf8, 0x08, 0xc7, 0x7b, 0xf8, 0x11, - 0xb1, 0x63, 0xcc, 0x3c, 0x2a, 0xa8, 0x5b, 0xd6, 0x82, 0x02, 0x5a, 0x1c, 0x66, 0x7e, 0x01, 0x46, - 0x8e, 0x03, 0x0d, 0x22, 0xec, 0xb0, 0x26, 0xc2, 0xd1, 0x06, 0xf4, 0xa2, 0x98, 0x60, 0xdf, 0xa7, - 0x0e, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x65, 0x41, 0xe6, 0x39, 0x38, 0x5b, 0xc9, 0x5c, 0x2e, 0xd0, - 0x7c, 0xa7, 0xb0, 0x7a, 0x1a, 0x04, 0x5e, 0x23, 0xd1, 0xe6, 0x7a, 0x69, 0xd5, 0x82, 0x52, 0xf1, - 0xfd, 0xef, 0xc2, 0xac, 0x4f, 0x70, 0x38, 0x8c, 0x1a, 0x31, 0x2e, 0xae, 0x58, 0x93, 0xa6, 0x9c, - 0xcf, 0x48, 0xb7, 0xd9, 0xa6, 0xbe, 0x4f, 0x1c, 0xe6, 0xd1, 0x50, 0xb3, 0x3d, 0x0f, 0xe0, 0xa4, - 0x40, 0xe5, 0x44, 0x19, 0x88, 0x69, 0x40, 0xbf, 0x4c, 0xaa, 0xd8, 0xfe, 0xb9, 0x05, 0xcf, 0xdd, - 0x50, 0x46, 0x93, 0x82, 0x1b, 0x6d, 0x40, 0x5e, 0xe4, 0x74, 0x51, 0x64, 0x71, 0x83, 0xda, 0xa5, - 0x0d, 0xe2, 0x18, 0x31, 0x89, 0x7c, 0xcf, 0xc1, 0x82, 0xc5, 0x8c, 0x60, 0x91, 0x05, 0xa1, 0x65, - 0x68, 0x33, 0xe6, 0x0b, 0xcf, 0xed, 0x5a, 0x7c, 0x88, 0xb6, 0x60, 0x2d, 0x20, 0x01, 0x8d, 0x47, - 0x76, 0x80, 0x23, 0x3b, 0xc0, 0x4f, 0x6c, 0xee, 0xe6, 0x76, 0xb0, 0xd7, 0x9f, 0x13, 0xeb, 0x43, - 0x72, 0xf6, 0x1e, 0x8e, 0xee, 0xe1, 0x27, 0xbb, 0xde, 0x53, 0x72, 0x6f, 0xcf, 0xec, 0xc3, 0x5a, - 0x51, 0x3f, 0xa5, 0xfa, 0x7f, 0xc1, 0x19, 0x09, 0xd9, 0x1d, 0x85, 0xce, 0xae, 0x88, 0xad, 0x46, - 0x1b, 0xf5, 0x8f, 0x16, 0xf4, 0xcb, 0x84, 0xca, 0xf3, 0x9f, 0xd5, 0x6a, 0xc7, 0xb6, 0xc9, 0x05, - 0xe8, 0x31, 0xec, 0xf9, 0x36, 0x1d, 0x0c, 0x12, 0xc2, 0x84, 0x21, 0x66, 0x2c, 0xe0, 0xa0, 0xfb, - 0x02, 0x82, 0x2e, 0xc3, 0xb2, 0x23, 0xbd, 0xdf, 0x8e, 0xc9, 0xa1, 0x27, 0xb2, 0xc1, 0xbc, 0x58, - 0xd8, 0x92, 0xa3, 0xa3, 0x42, 0x82, 0x91, 0x09, 0x8b, 0x9e, 0xfb, 0xc4, 0x16, 0xe9, 0x48, 0x24, - 0x93, 0x8e, 0xe0, 0xd6, 0xf3, 0xdc, 0x27, 0x77, 0x3c, 0x9f, 0x70, 0x8b, 0x9a, 0x0f, 0x61, 0x5d, - 0x2a, 0x7f, 0x37, 0x74, 0x62, 0x12, 0x90, 0x90, 0x61, 0x7f, 0x9b, 0x46, 0xa3, 0x46, 0x6e, 0xf3, - 0x3c, 0x74, 0x12, 0x2f, 0x74, 0x88, 0x1d, 0xca, 0xa4, 0x36, 0x63, 0xcd, 0x8b, 0xef, 0x9d, 0xc4, - 0xbc, 0x09, 0xe7, 0x6a, 0xf8, 0x2a, 0xcb, 0xbe, 0x00, 0x0b, 0x62, 0x61, 0x0e, 0x0d, 0x19, 0x09, - 0x99, 0xe0, 0xbd, 0x60, 0xf5, 0x38, 0x6c, 0x5b, 0x82, 0xcc, 0xd7, 0x01, 0x49, 0x1e, 0xf7, 0xe8, - 0x30, 0x6c, 0x16, 0xce, 0xcf, 0xc1, 0x4a, 0x8e, 0x44, 0xf9, 0xc6, 0x1b, 0xb0, 0x2a, 0xc1, 0x9f, - 0x86, 0x41, 0x63, 0x5e, 0x67, 0xe0, 0xb9, 0x02, 0x91, 0xe2, 0xb6, 0xa5, 0x85, 0xe4, 0x8f, 0x9d, - 0x23, 0x99, 0xad, 0xe9, 0x15, 0xe4, 0x4f, 0x1e, 0x91, 0xb9, 0xe4, 0x82, 0x71, 0x7c, 0x60, 0x11, - 0xec, 0xd2, 0xd0, 0x1f, 0x35, 0xce, 0x5c, 0x15, 0x94, 0x8a, 0xef, 0xef, 0x5a, 0x70, 0x5a, 0xa7, - 0xb4, 0x86, 0xbb, 0x79, 0x4c, 0x77, 0x6e, 0xd7, 0xba, 0xf3, 0xcc, 0xd8, 0x9d, 0x2f, 0xc1, 0x72, - 0x42, 0x87, 0xb1, 0x43, 0x6c, 0x17, 0x33, 0x6c, 0x87, 0xd4, 0x25, 0xca, 0xdb, 0x4f, 0x49, 0xf8, - 0x2d, 0xcc, 0xf0, 0x0e, 0x75, 0x89, 0xf9, 0xbf, 0x7a, 0xb3, 0x73, 0x5e, 0x72, 0x19, 0x4e, 0xfb, - 0x38, 0x61, 0x36, 0x8e, 0x22, 0x12, 0xba, 0x36, 0x66, 0xdc, 0xd5, 0x5a, 0xc2, 0xd5, 0x4e, 0xf1, - 0x89, 0x1b, 0x02, 0x7e, 0x83, 0xed, 0x24, 0xe6, 0xaf, 0xa7, 0x61, 0x89, 0xd3, 0x72, 0xd7, 0x6e, - 0xa4, 0xef, 0x32, 0xb4, 0xc9, 0x13, 0xa6, 0x14, 0xe5, 0x43, 0x74, 0x0d, 0x56, 0x54, 0x0c, 0x79, - 0x34, 0x1c, 0x87, 0x57, 0x5b, 0x66, 0xa3, 0xf1, 0x54, 0x1a, 0x61, 0x17, 0xa0, 0x97, 0x30, 0x1a, - 0xe9, 0x68, 0x9d, 0x91, 0xd1, 0xca, 0x41, 0x2a, 0x5a, 0xf3, 0x36, 0x9d, 0xad, 0xb0, 0xe9, 0x82, - 0x97, 0xd8, 0xc4, 0xb1, 0xe5, 0xaa, 0x44, 0xbc, 0x77, 0x2c, 0xf0, 0x92, 0xdb, 0x8e, 0xb4, 0x06, - 0xfa, 0x00, 0xd6, 0xbd, 0x47, 0x21, 0x8d, 0x89, 0xad, 0x0c, 0x29, 0xa2, 0x26, 0xa4, 0xcc, 0x1e, - 0xd0, 0x61, 0xe8, 0x8a, 0xd8, 0xef, 0x58, 0x7d, 0x89, 0xb3, 0x2b, 0x50, 0xb8, 0x05, 0x76, 0x28, - 0xbb, 0xc3, 0xe7, 0xcd, 0xb7, 0x60, 0x79, 0x6c, 0x95, 0xe6, 0xb1, 0xf7, 0x4d, 0x4b, 0xa7, 0xd3, - 0x07, 0xd8, 0xf3, 0x77, 0x49, 0xe8, 0x92, 0xf8, 0x19, 0x73, 0x02, 0xba, 0x0e, 0xab, 0x9e, 0xeb, - 0x13, 0x9b, 0x79, 0x01, 0xa1, 0x43, 0x66, 0x27, 0xc4, 0xa1, 0xa1, 0x9b, 0x68, 0xfb, 0xf2, 0xb9, - 0x07, 0x72, 0x6a, 0x57, 0xce, 0x98, 0x3f, 0x4d, 0x73, 0x73, 0x76, 0x15, 0xe3, 0x5b, 0x49, 0x48, - 0x08, 0x67, 0xb8, 0x4f, 0xb0, 0x4b, 0x62, 0xa5, 0xc6, 0x82, 0x04, 0x7e, 0x4f, 0xc0, 0xf8, 0x0e, - 0x29, 0xa4, 0x3d, 0xea, 0x8e, 0xc4, 0x8a, 0x16, 0x2c, 0x90, 0xa0, 0x9b, 0xd4, 0x1d, 0x89, 0x24, - 0x99, 0xd8, 0xc2, 0xc9, 0x9c, 0xfd, 0x61, 0x78, 0x20, 0x56, 0xd3, 0xb1, 0x7a, 0x5e, 0xf2, 0x11, - 0x4e, 0xd8, 0x36, 0x07, 0x99, 0xbf, 0x6f, 0xe9, 0x28, 0xe5, 0xcb, 0xb0, 0x88, 0x43, 0xbc, 0xc3, - 0x7f, 0x83, 0x39, 0x38, 0x85, 0x72, 0x82, 0xdc, 0xed, 0x54, 0x05, 0x1c, 0x92, 0x73, 0xea, 0x2c, - 0x13, 0x33, 0xe3, 0x24, 0x91, 0x5f, 0xb8, 0x4a, 0x12, 0x5f, 0xea, 0x24, 0x7d, 0xdb, 0xd9, 0xdd, - 0xc7, 0xb1, 0x9b, 0xfc, 0x3f, 0x09, 0x49, 0x8c, 0xd9, 0x89, 0x5c, 0x1a, 0xcc, 0x0d, 0x38, 0x5f, - 0xc7, 0x5d, 0xc9, 0xff, 0x42, 0x1f, 0x3e, 0x1a, 0xc3, 0x22, 0x7b, 0x43, 0xcf, 0x77, 0x4f, 0x44, - 0xfc, 0x87, 0x45, 0xe5, 0x52, 0xe6, 0xca, 0x7f, 0xae, 0xc0, 0xe9, 0x58, 0x80, 0x98, 0x9d, 0x70, - 0x84, 0xb4, 0x5e, 0x58, 0xb4, 0x96, 0xd4, 0x84, 0x20, 0xe4, 0x75, 0xc3, 0xcf, 0xa7, 0xb5, 0x07, - 0x68, 0x6e, 0x27, 0x96, 0x56, 0xcf, 0x42, 0x77, 0x2c, 0xbe, 0x2d, 0xc4, 0x77, 0x12, 0x25, 0x97, - 0x7b, 0xa7, 0x43, 0xa3, 0x91, 0x4d, 0x1c, 0x79, 0x8e, 0x8b, 0xad, 0xee, 0x58, 0x3d, 0x0e, 0xbc, - 0xed, 0x88, 0x63, 0xbc, 0x79, 0x8e, 0xcd, 0x70, 0xfb, 0x4a, 0x72, 0x9b, 0xcb, 0x72, 0xfb, 0x4a, - 0x70, 0xd3, 0x38, 0x87, 0xde, 0x40, 0xe2, 0xcc, 0x8f, 0x71, 0x1e, 0x7a, 0x03, 0x8e, 0x33, 0xf6, - 0xaa, 0xbc, 0x31, 0xd4, 0xae, 0x7e, 0x0d, 0x67, 0xf3, 0xb3, 0xcd, 0x8f, 0xc9, 0x67, 0x32, 0x96, - 0x79, 0xbe, 0xe8, 0x4e, 0x85, 0xb3, 0xf6, 0xb0, 0xb8, 0xec, 0xc6, 0xf7, 0x8a, 0x67, 0x5b, 0xd7, - 0xb9, 0xa2, 0x41, 0xf2, 0x97, 0x93, 0xcf, 0x8a, 0xcb, 0x3e, 0xc6, 0x25, 0xe5, 0x68, 0xc1, 0x17, - 0x8a, 0x21, 0x50, 0xbc, 0xc9, 0xfc, 0x26, 0xcd, 0xaf, 0x0a, 0x83, 0xdf, 0x23, 0x1a, 0xe7, 0x35, - 0x25, 0x57, 0x98, 0x63, 0xd1, 0x9a, 0x57, 0x62, 0x79, 0xa1, 0xab, 0xce, 0x43, 0x59, 0x27, 0xa8, - 0xaf, 0x5c, 0x49, 0xdb, 0x56, 0x25, 0xad, 0x2e, 0xd5, 0x0f, 0xc8, 0x48, 0xf8, 0xec, 0x8c, 0x2c, - 0xd5, 0x3f, 0x24, 0x23, 0x73, 0xa7, 0x10, 0x71, 0x72, 0x69, 0x2a, 0x76, 0x11, 0xcc, 0x70, 0x67, - 0x57, 0x29, 0x5f, 0x8c, 0xd1, 0x39, 0x00, 0x2f, 0xb1, 0x5d, 0xb1, 0xe7, 0x72, 0x51, 0x1d, 0xab, - 0xeb, 0x29, 0x27, 0x70, 0xcd, 0x5f, 0xb4, 0xc6, 0x0c, 0x6f, 0xfa, 0x74, 0xef, 0x04, 0xbd, 0x32, - 0xab, 0x45, 0x3b, 0xa7, 0x45, 0xb6, 0x66, 0x9f, 0xc9, 0xd7, 0xec, 0x99, 0x20, 0xca, 0x2e, 0xa7, - 0x2e, 0x35, 0x3f, 0xa0, 0x27, 0x57, 0xcf, 0x95, 0x53, 0xf3, 0x98, 0xbb, 0x92, 0xff, 0x2e, 0x9c, - 0xe5, 0x06, 0x97, 0x50, 0x51, 0x2d, 0x34, 0xaf, 0xa8, 0xfe, 0x3a, 0x0d, 0xeb, 0xd5, 0xc4, 0x4d, - 0xaa, 0xaa, 0xf7, 0xc0, 0x48, 0xab, 0x16, 0x7e, 0x34, 0x26, 0x0c, 0x07, 0x51, 0x7a, 0x38, 0xca, - 0x33, 0xf4, 0x8c, 0x2a, 0x61, 0x1e, 0xe8, 0x79, 0x7d, 0x42, 0x96, 0x4a, 0x9e, 0x76, 0xa9, 0xe4, - 0xe1, 0x02, 0x5c, 0xcc, 0xea, 0x04, 0xc8, 0x3b, 0xdc, 0x19, 0x17, 0xb3, 0x3a, 0x01, 0x29, 0xb1, - 0x10, 0x20, 0xbd, 0xb6, 0xa7, 0xf0, 0x85, 0x80, 0x73, 0x00, 0xea, 0x7a, 0x35, 0x0c, 0x75, 0x09, - 0xd7, 0x95, 0x97, 0xab, 0x61, 0x58, 0x7b, 0xcb, 0x9c, 0xaf, 0xbd, 0x65, 0xe6, 0x77, 0xb3, 0x53, - 0xda, 0xcd, 0xcf, 0x00, 0x6e, 0x79, 0xc9, 0x81, 0x34, 0x32, 0xbf, 0xd6, 0xba, 0x5e, 0xac, 0xde, - 0x0d, 0xf8, 0x90, 0x43, 0xb0, 0xef, 0x2b, 0xd3, 0xf1, 0x21, 0x0f, 0x9f, 0x61, 0x42, 0x5c, 0x65, - 0x1d, 0x31, 0xe6, 0xb0, 0x41, 0x4c, 0x88, 0x32, 0x80, 0x18, 0x9b, 0xbf, 0x6d, 0x41, 0xf7, 0x1e, - 0x09, 0x14, 0xe7, 0xf3, 0x00, 0x8f, 0x68, 0x4c, 0x87, 0xcc, 0x0b, 0x89, 0xbc, 0x85, 0xcf, 0x5a, - 0x19, 0xc8, 0x77, 0x97, 0x23, 0x52, 0x03, 0xf1, 0x07, 0xca, 0x98, 0x62, 0xcc, 0x61, 0xfb, 0x04, - 0x47, 0xca, 0x7e, 0x62, 0x8c, 0x56, 0x61, 0x36, 0x61, 0xd8, 0x39, 0x10, 0xc6, 0x9a, 0xb1, 0xe4, - 0x87, 0xf9, 0xa7, 0x16, 0x80, 0x45, 0x02, 0xca, 0x84, 0xaf, 0xf1, 0xdb, 0xed, 0x1e, 0x76, 0x0e, - 0x78, 0xbd, 0xc0, 0x46, 0x11, 0x51, 0x96, 0xe8, 0x29, 0xd8, 0x83, 0x51, 0x24, 0x76, 0x48, 0xa3, - 0xa8, 0xfc, 0xd5, 0xb5, 0xba, 0x0a, 0x22, 0x2b, 0x03, 0x1d, 0xca, 0x5d, 0x8b, 0x0f, 0x33, 0x39, - 0x4d, 0x2e, 0x5b, 0xe7, 0xb4, 0xb3, 0xd0, 0x2d, 0xba, 0x82, 0x48, 0x05, 0xc2, 0x0f, 0x2e, 0xc2, - 0x62, 0x40, 0x5d, 0x6f, 0xe0, 0x11, 0x57, 0x38, 0x9a, 0x52, 0x65, 0x41, 0x03, 0xb9, 0x73, 0xa1, - 0x75, 0xe8, 0x92, 0x27, 0x8c, 0x84, 0xa9, 0x0f, 0x74, 0xad, 0x31, 0xc0, 0xfc, 0x1c, 0x40, 0x97, - 0xd1, 0x03, 0x8a, 0xb6, 0x60, 0x96, 0x33, 0xd7, 0x8f, 0x94, 0xeb, 0xe5, 0x47, 0xca, 0xb1, 0x19, - 0x2c, 0x89, 0x9a, 0x4d, 0x40, 0xd3, 0xf9, 0x04, 0xf4, 0x6d, 0x0b, 0x36, 0xd4, 0xe5, 0xd0, 0x23, - 0xf1, 0x3d, 0x7a, 0xc8, 0x2f, 0x0a, 0x0f, 0xa8, 0x64, 0x71, 0x22, 0x79, 0xf1, 0x1d, 0xe8, 0xbb, - 0x24, 0x61, 0x5e, 0x28, 0xca, 0x43, 0x5b, 0x9b, 0x3c, 0xc4, 0x01, 0x51, 0xc6, 0x5d, 0xcb, 0xcc, - 0xdf, 0x94, 0xd3, 0x3b, 0x38, 0x20, 0xe8, 0x2a, 0xac, 0x1c, 0x10, 0x12, 0xd9, 0x3e, 0x75, 0xb0, - 0x6f, 0xeb, 0x88, 0x53, 0xb7, 0x9f, 0x65, 0x3e, 0xf5, 0x11, 0x9f, 0xb9, 0x25, 0xa3, 0xce, 0x4c, - 0xe0, 0x85, 0x23, 0x34, 0x51, 0x59, 0x67, 0x1d, 0xba, 0x51, 0x4c, 0x1d, 0x92, 0x70, 0x8f, 0x6c, - 0x89, 0x43, 0x68, 0x0c, 0x40, 0xd7, 0x61, 0x25, 0xfd, 0xf8, 0x98, 0xc4, 0x0e, 0x09, 0x19, 0x7e, - 0x24, 0xdf, 0x22, 0xa7, 0xad, 0xaa, 0x29, 0xf3, 0x57, 0x2d, 0x30, 0x4b, 0x52, 0xef, 0xc4, 0x34, - 0x38, 0x41, 0x0b, 0x5e, 0x83, 0x55, 0x61, 0x87, 0x58, 0xb0, 0x1c, 0x1b, 0x42, 0x16, 0x29, 0xa7, - 0xf9, 0x9c, 0x94, 0xa6, 0x2d, 0x31, 0x84, 0x8b, 0x47, 0xae, 0xe9, 0x5f, 0x64, 0x8b, 0xbf, 0x2f, - 0xc0, 0xc2, 0x27, 0x43, 0x12, 0x8f, 0x32, 0x8f, 0x98, 0x09, 0x51, 0x5a, 0xe8, 0x57, 0xf8, 0x0c, - 0x84, 0xe7, 0xd1, 0x41, 0x4c, 0x03, 0x3b, 0x7d, 0xa8, 0x9f, 0x16, 0x28, 0x3d, 0x0e, 0xbc, 0x23, - 0x1f, 0xeb, 0xd1, 0xfb, 0x30, 0x37, 0xf0, 0x7c, 0x46, 0xe4, 0xd3, 0x78, 0x6f, 0xeb, 0xa5, 0xb2, - 0xbf, 0x67, 0x65, 0x6e, 0xde, 0x11, 0xc8, 0x96, 0x22, 0x42, 0x7b, 0xb0, 0xe2, 0x85, 0x91, 0x28, - 0xac, 0x62, 0x0f, 0xfb, 0xde, 0xd3, 0xf1, 0x33, 0x5c, 0x6f, 0xeb, 0xf5, 0x09, 0xbc, 0xee, 0x72, - 0xca, 0xdd, 0x2c, 0xa1, 0x85, 0xbc, 0x12, 0x0c, 0x11, 0x58, 0xa5, 0x43, 0x56, 0x16, 0x32, 0x2b, - 0x84, 0x6c, 0x4d, 0x10, 0x72, 0x5f, 0x90, 0xe6, 0xa5, 0xac, 0xd0, 0x32, 0xd0, 0xd8, 0x81, 0x39, - 0xa9, 0x1c, 0xcf, 0x80, 0x03, 0x8f, 0xf8, 0xba, 0xb9, 0x20, 0x3f, 0x78, 0x90, 0xd3, 0x88, 0xc4, - 0x38, 0xd4, 0xc9, 0x4c, 0x7f, 0x72, 0xfc, 0x43, 0xec, 0x0f, 0x75, 0xbc, 0xc9, 0x0f, 0xe3, 0x8f, - 0xb3, 0x80, 0xca, 0x1a, 0xea, 0xb7, 0xc5, 0x98, 0x24, 0x3c, 0x41, 0x64, 0xb3, 0xe7, 0x52, 0x06, - 0x2e, 0x32, 0xe8, 0x0f, 0xa0, 0xeb, 0x24, 0x87, 0xb6, 0x30, 0x89, 0x90, 0xd9, 0xdb, 0x7a, 0xf7, - 0xd8, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x28, 0xa0, 0x56, 0xc7, 0x49, 0x0e, 0xc5, 0x08, 0x7d, 0x0e, - 0xf0, 0x55, 0x42, 0x43, 0xc5, 0x59, 0x6e, 0xfc, 0x7b, 0xc7, 0xe7, 0xfc, 0xfd, 0xdd, 0xfb, 0x3b, - 0x92, 0x75, 0x97, 0xb3, 0x93, 0xbc, 0x1d, 0x58, 0x8c, 0x70, 0xfc, 0x78, 0x48, 0x98, 0x62, 0x2f, - 0x7d, 0xe1, 0x83, 0xe3, 0xb3, 0xff, 0x58, 0xb2, 0x91, 0x12, 0x16, 0xa2, 0xcc, 0x97, 0xf1, 0xed, - 0x34, 0x74, 0xb4, 0x5e, 0xbc, 0x36, 0x13, 0x1e, 0x2e, 0x5f, 0x28, 0x6c, 0x2f, 0x1c, 0x50, 0x65, - 0xd1, 0x53, 0x1c, 0x2e, 0x1f, 0x29, 0x44, 0x6e, 0xbf, 0x0c, 0xcb, 0x31, 0x71, 0x68, 0xec, 0xf2, - 0x1b, 0xac, 0x17, 0x78, 0xdc, 0xed, 0xe5, 0x5e, 0x2e, 0x49, 0xf8, 0x2d, 0x0d, 0x46, 0xaf, 0xc0, - 0x92, 0xd8, 0xf6, 0x0c, 0x66, 0x5b, 0xf3, 0x24, 0x7e, 0x06, 0xf1, 0x32, 0x2c, 0x3f, 0x1e, 0xf2, - 0xbc, 0xe1, 0xec, 0xe3, 0x18, 0x3b, 0x8c, 0xa6, 0x6f, 0x05, 0x4b, 0x02, 0xbe, 0x9d, 0x82, 0xd1, - 0x9b, 0xb0, 0x26, 0x51, 0x49, 0xe2, 0xe0, 0x28, 0xa5, 0x20, 0xb1, 0x2a, 0x25, 0x57, 0xc5, 0xec, - 0x6d, 0x31, 0xb9, 0xad, 0xe7, 0x90, 0x01, 0x1d, 0x87, 0x06, 0x01, 0x09, 0x59, 0x22, 0x0e, 0xb7, - 0xae, 0x95, 0x7e, 0xa3, 0x1b, 0x70, 0x0e, 0xfb, 0x3e, 0xfd, 0xda, 0x16, 0x94, 0xae, 0x5d, 0xd2, - 0x4e, 0x16, 0x96, 0x86, 0x40, 0xfa, 0x44, 0xe0, 0x58, 0x79, 0x45, 0x8d, 0x0b, 0xd0, 0x4d, 0xf7, - 0x91, 0xdf, 0x07, 0x32, 0x0e, 0x29, 0xc6, 0xc6, 0x29, 0x58, 0xc8, 0xee, 0x84, 0xf1, 0xb7, 0x36, - 0xac, 0x54, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x7b, 0xab, 0x0c, 0x2d, 0xe5, 0xae, 0xff, 0x73, 0xfc, - 0xe0, 0xe4, 0xfe, 0x2a, 0xc1, 0x16, 0xf7, 0x7e, 0x39, 0x44, 0x3f, 0x82, 0x9e, 0xf0, 0x58, 0xc5, - 0x5d, 0xba, 0xec, 0xfb, 0xdf, 0x81, 0x3b, 0xd7, 0x55, 0xb1, 0x17, 0x31, 0x20, 0xc7, 0xc6, 0x5f, - 0x5a, 0xd0, 0x4d, 0x05, 0xf3, 0xdb, 0x8d, 0xdc, 0x28, 0xb1, 0xd7, 0x89, 0xbe, 0xdd, 0x08, 0xd8, - 0x1d, 0x01, 0xfa, 0x8f, 0x74, 0x25, 0xe3, 0x6d, 0x80, 0xb1, 0xfe, 0x95, 0x2a, 0xb4, 0x2a, 0x55, - 0x30, 0x2f, 0xc3, 0x22, 0xb7, 0xac, 0x47, 0xdc, 0x5d, 0x16, 0x7b, 0x91, 0x68, 0x93, 0x4a, 0x9c, - 0x44, 0x95, 0x87, 0xfa, 0x73, 0xeb, 0x0f, 0x06, 0x2c, 0x64, 0x9f, 0xc7, 0xd0, 0x97, 0xd0, 0xcb, - 0xb4, 0x83, 0xd1, 0x8b, 0xe5, 0x4d, 0x2b, 0xb7, 0x97, 0x8d, 0x97, 0x26, 0x60, 0xa9, 0x0a, 0x6a, - 0x0a, 0x85, 0x70, 0xba, 0xd4, 0x53, 0x45, 0x57, 0xca, 0xd4, 0x75, 0x1d, 0x5b, 0xe3, 0xd5, 0x46, - 0xb8, 0xa9, 0x3c, 0x06, 0x2b, 0x15, 0x4d, 0x52, 0xf4, 0xda, 0x04, 0x2e, 0xb9, 0x46, 0xad, 0x71, - 0xb5, 0x21, 0x76, 0x2a, 0xf5, 0x31, 0xa0, 0x72, 0x07, 0x15, 0xbd, 0x3a, 0x91, 0xcd, 0xb8, 0x43, - 0x6b, 0xbc, 0xd6, 0x0c, 0xb9, 0x56, 0x51, 0xd9, 0x5b, 0x9d, 0xa8, 0x68, 0xae, 0x7b, 0x3b, 0x51, - 0xd1, 0x42, 0xc3, 0x76, 0x0a, 0x1d, 0xc0, 0x72, 0xb1, 0xef, 0x8a, 0x2e, 0xd7, 0xfd, 0x27, 0x50, - 0x6a, 0xeb, 0x1a, 0x57, 0x9a, 0xa0, 0xa6, 0xc2, 0x08, 0x9c, 0xca, 0xf7, 0x39, 0xd1, 0x2b, 0x65, - 0xfa, 0xca, 0x4e, 0xaf, 0x71, 0x69, 0x32, 0x62, 0x56, 0xa7, 0x62, 0xef, 0xb3, 0x4a, 0xa7, 0x9a, - 0xc6, 0x6a, 0x95, 0x4e, 0x75, 0xad, 0x54, 0x73, 0x0a, 0xfd, 0x58, 0x37, 0xd4, 0x0a, 0x3d, 0x41, - 0xb4, 0x59, 0xc7, 0xa6, 0xba, 0x29, 0x69, 0x5c, 0x6b, 0x8c, 0xaf, 0x65, 0x5f, 0x6f, 0xf1, 0x58, - 0xcf, 0xb4, 0x06, 0xab, 0x62, 0xbd, 0xdc, 0x6c, 0xac, 0x8a, 0xf5, 0xaa, 0xfe, 0xe2, 0x14, 0xda, - 0x83, 0xc5, 0x5c, 0xb3, 0x10, 0xbd, 0x5c, 0x47, 0x99, 0x7f, 0xdd, 0x33, 0x5e, 0x99, 0x88, 0x97, - 0xca, 0xb0, 0x75, 0xf6, 0x52, 0xe9, 0xaa, 0x76, 0x71, 0xf9, 0x7c, 0xf5, 0xf2, 0x24, 0xb4, 0x5c, - 0x28, 0x97, 0x5a, 0x8a, 0x95, 0xa1, 0x5c, 0xd7, 0xb2, 0xac, 0x0c, 0xe5, 0xfa, 0x2e, 0xe5, 0x14, - 0xfa, 0xa1, 0x2e, 0x70, 0x85, 0x23, 0x5c, 0xac, 0xa3, 0xce, 0xee, 0xfe, 0x8b, 0x47, 0x23, 0xa5, - 0xac, 0xbf, 0x86, 0xd5, 0xaa, 0x57, 0x28, 0x74, 0xb5, 0xaa, 0x6c, 0xae, 0x7d, 0xea, 0x32, 0x36, - 0x9b, 0xa2, 0xa7, 0x82, 0x3f, 0x85, 0x8e, 0x6e, 0xb9, 0xa1, 0x17, 0xca, 0xd4, 0x85, 0x26, 0xa5, - 0x61, 0x1e, 0x85, 0x92, 0x71, 0xe0, 0x40, 0xc7, 0xea, 0xb8, 0x17, 0x56, 0x1f, 0xab, 0xa5, 0xae, - 0x5d, 0x7d, 0xac, 0x96, 0x5b, 0x6b, 0x42, 0x5c, 0xea, 0x0c, 0xd9, 0xd6, 0x51, 0xbd, 0x33, 0x54, - 0x74, 0xc6, 0xea, 0x9d, 0xa1, 0xb2, 0x1b, 0x35, 0x85, 0x7e, 0x02, 0x6b, 0xd5, 0x1d, 0x23, 0x54, - 0x1b, 0xf1, 0x35, 0x9d, 0x2b, 0xe3, 0x7a, 0x73, 0x82, 0x54, 0xfc, 0x53, 0x9d, 0x9f, 0x0a, 0x1d, - 0xa3, 0xfa, 0xfc, 0x54, 0xdd, 0xb7, 0x32, 0xae, 0x35, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0xa5, 0x52, - 0x6f, 0xed, 0x8a, 0x2e, 0x54, 0xbd, 0xb5, 0x2b, 0xbb, 0x34, 0x22, 0x3e, 0xaa, 0xda, 0x25, 0x55, - 0xf1, 0x71, 0x44, 0x3f, 0xc7, 0xd8, 0x6c, 0x8a, 0x9e, 0x3b, 0xbe, 0xcb, 0xfd, 0x10, 0x34, 0x71, - 0xfd, 0xb9, 0xcc, 0x7c, 0xb5, 0x21, 0x76, 0xfd, 0xee, 0xea, 0x4c, 0x3d, 0x51, 0x81, 0x42, 0xc6, - 0xbe, 0xd6, 0x18, 0x3f, 0x95, 0x1d, 0xe9, 0x9f, 0x31, 0x32, 0xbd, 0x0c, 0x74, 0x65, 0x02, 0x9f, - 0x4c, 0x2f, 0xc6, 0x78, 0xb5, 0x11, 0x6e, 0x55, 0xf4, 0x66, 0xbb, 0x0b, 0x47, 0xf9, 0x53, 0xa9, - 0x25, 0x72, 0x94, 0x3f, 0x55, 0x34, 0x2c, 0x2a, 0xa2, 0x57, 0x37, 0x15, 0x26, 0x47, 0x6f, 0xa1, - 0xb9, 0x31, 0x39, 0x7a, 0x4b, 0xfd, 0x8a, 0x29, 0xf4, 0xb3, 0x71, 0x93, 0xbe, 0xfc, 0x08, 0x88, - 0xb6, 0x6a, 0x53, 0x51, 0xed, 0xdb, 0xa7, 0xf1, 0xc6, 0xb1, 0x68, 0x32, 0xc6, 0xff, 0x65, 0x4b, - 0x77, 0xfc, 0x2a, 0x5f, 0xe1, 0xd0, 0x9b, 0x0d, 0x18, 0x97, 0x1e, 0x12, 0x8d, 0xb7, 0x8e, 0x49, - 0x95, 0x59, 0xd0, 0x47, 0x30, 0x2b, 0xaa, 0x4f, 0x74, 0xfe, 0xe8, 0xb2, 0xd4, 0xb8, 0x50, 0x3d, - 0x9f, 0x16, 0x57, 0x9c, 0xdb, 0xde, 0x9c, 0xf8, 0x1d, 0xf7, 0x8d, 0x7f, 0x06, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x31, 0x6f, 0x58, 0xa5, 0x2b, 0x00, 0x00, -} diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go new file mode 100644 index 000000000..519a9a201 --- /dev/null +++ b/weed/replication/repl_util/replication_util.go @@ -0,0 +1,42 @@ +package repl_util + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error { + + for _, chunk := range chunkViews { + + fileUrls, err := filerSource.LookupFileId(chunk.FileId) + if err != nil { + return err + } + + var writeErr error + var shouldRetry bool + + for _, fileUrl := range fileUrls { + shouldRetry, err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { + writeErr = writeFunc(data) + }) + if err != nil { + glog.V(1).Infof("read from %s: %v", fileUrl, err) + } else if writeErr != nil { + glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr) + } else { + break + } + } + if shouldRetry && err != nil { + return err + } + if writeErr != nil { + return writeErr + } + } + return nil +} diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 7353cdc91..d7e609c68 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -3,8 +3,10 @@ package replication import ( "context" "fmt" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/pb" + "google.golang.org/grpc" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -18,10 +20,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) @@ -32,37 +34,64 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin } func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error { + if message.IsFromOtherCluster && r.sink.GetName() == "filer" { + return nil + } if !strings.HasPrefix(key, r.source.Dir) { glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])) + var dateKey string + if r.sink.IsIncremental() { + var mTime int64 + if message.NewEntry != nil { + mTime = message.NewEntry.Attributes.Mtime + } else if message.OldEntry != nil { + mTime = message.OldEntry.Attributes.Mtime + } + dateKey = time.Unix(mTime, 0).Format("2006-01-02") + } + newKey := util.Join(r.sink.GetSinkToDirectory(), dateKey, key[len(r.source.Dir):]) glog.V(3).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) if foundExisting { glog.V(4).Infof("updated %v", key) return err } - err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures) if err != nil { return fmt.Errorf("delete old entry %v: %v", key, err) } glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) +} + +func ReadFilerSignature(grpcDialOption grpc.DialOption, filer string) (filerSignature int32, readErr error) { + if readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}); err != nil { + return fmt.Errorf("GetFilerConfiguration %s: %v", filer, err) + } else { + filerSignature = resp.Signature + } + return nil + }); readErr != nil { + return 0, readErr + } + return filerSignature, nil } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6381908a1..d13a1049b 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -4,11 +4,12 @@ import ( "bytes" "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "net/url" "strings" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -17,10 +18,11 @@ import ( ) type AzureSink struct { - containerURL azblob.ContainerURL - container string - dir string - filerSource *source.FilerSource + containerURL azblob.ContainerURL + container string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -35,12 +37,17 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) IsIncremental() bool { + return g.isIncremental +} + +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } @@ -70,7 +77,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -78,7 +85,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de key = key + "/" } - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } @@ -87,7 +94,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de } -func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) @@ -95,44 +102,32 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) - _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { return err } - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) - }) - - if readErr != nil { - return readErr - } - if writeErr != nil { - return writeErr - } + writeFunc := func(data []byte) error { + _, writeErr := appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) + return writeErr + } + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { + return err } return nil } -func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 35c2230fa..90a0bb2e8 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -2,9 +2,10 @@ package B2Sink import ( "context" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" @@ -13,10 +14,11 @@ import ( ) type B2Sink struct { - client *b2.Client - bucket string - dir string - filerSource *source.FilerSource + client *b2.Client + bucket string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -31,12 +33,17 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) IsIncremental() bool { + return g.isIncremental +} + +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -45,8 +52,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) { } func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { - ctx := context.Background() - client, err := b2.NewClient(ctx, accountId, accountKey) + client, err := b2.NewClient(context.Background(), accountId, accountKey) if err != nil { return err } @@ -58,7 +64,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -66,18 +72,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet key = key + "/" } - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - return targetObject.Delete(ctx) + return targetObject.Delete(context.Background()) } -func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) @@ -85,46 +91,33 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - writer := targetObject.NewWriter(ctx) - - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, err := writer.Write(data) - if err != nil { - writeErr = err - } - }) - - if readErr != nil { - return readErr - } - if writeErr != nil { - return writeErr - } + writer := targetObject.NewWriter(context.Background()) + writeFunc := func(data []byte) error { + _, writeErr := writer.Write(data) + return writeErr } - return writer.Close() + defer writer.Close() + + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { + return err + } + + return nil } -func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/README.txt b/weed/replication/sink/filersink/README.txt new file mode 100644 index 000000000..4ba0fc752 --- /dev/null +++ b/weed/replication/sink/filersink/README.txt @@ -0,0 +1,12 @@ +How replication works +====== + +All metadata changes within current cluster would be notified to a message queue. + +If the meta data change is from other clusters, this metadata would change would not be notified to the message queue. + +So active<=>active replication is possible. + + +All metadata changes would be published as metadata changes. +So all mounts listening for metadata changes will get updated.
\ No newline at end of file diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 97e9671a3..a7392d856 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,41 +3,46 @@ package filersink import ( "context" "fmt" - "google.golang.org/grpc" - "strings" + "github.com/chrislusf/seaweedfs/weed/util" "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } + + replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) + var wg sync.WaitGroup - for _, sourceChunk := range sourceChunks { + for chunkIndex, sourceChunk := range sourceChunks { wg.Add(1) - go func(chunk *filer_pb.FileChunk) { + go func(chunk *filer_pb.FileChunk, index int) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) + replicatedChunk, e := fs.replicateOneChunk(chunk, path) if e != nil { err = e + return } - replicatedChunks = append(replicatedChunks, replicatedChunk) - }(sourceChunk) + replicatedChunks[index] = replicatedChunk + }(sourceChunk, chunkIndex) } wg.Wait() return } -func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(ctx, sourceChunk) + fileId, err := fs.fetchAndWrite(sourceChunk, path) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } @@ -49,21 +54,23 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), + CipherKey: sourceChunk.CipherKey, + IsCompressed: sourceChunk.IsCompressed, }, nil } -func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) + filename, header, resp, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString()) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } - defer readCloser.Close() + defer util.CloseResponse(resp) var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -71,13 +78,18 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi Collection: fs.collection, TtlSec: fs.ttlSec, DataCenter: fs.dataCenter, + DiskType: fs.diskType, + Path: path, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) @@ -87,13 +99,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if fs.writeChunkByFiler { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId) + } glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) + // fetch data as is, regardless whether it is encrypted or not + uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { - glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) + glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err) return "", fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { @@ -104,19 +119,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSink{}) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { +func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId +func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string { + return location.Url } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index f99c7fdf6..d7c5fccc3 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,11 +3,14 @@ package filersink import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/security" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -16,14 +19,18 @@ import ( ) type FilerSink struct { - filerSource *source.FilerSource - grpcAddress string - dir string - replication string - collection string - ttlSec int32 - dataCenter string - grpcDialOption grpc.DialOption + filerSource *source.FilerSource + grpcAddress string + dir string + replication string + collection string + ttlSec int32 + diskType string + dataCenter string + grpcDialOption grpc.DialOption + address string + writeChunkByFiler bool + isIncremental bool } func init() { @@ -38,58 +45,63 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { - return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), - ) +func (fs *FilerSink) IsIncremental() bool { + return fs.isIncremental +} + +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { + fs.isIncremental = configuration.GetBool(prefix + "is_incremental") + return fs.DoInitialize( + "", + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), + configuration.GetString(prefix+"disk"), + security.LoadClientTLS(util.GetViper(), "grpc.client"), + false) } func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) { fs.filerSource = s } -func (fs *FilerSink) initialize(grpcAddress string, dir string, - replication string, collection string, ttlSec int) (err error) { +func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string, + replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) { + fs.address = address + if fs.address == "" { + fs.address = pb.GrpcAddressToServerAddress(grpcAddress) + } fs.grpcAddress = grpcAddress fs.dir = dir fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.diskType = diskType + fs.grpcDialOption = grpcDialOption + fs.writeChunkByFiler = writeChunkByFiler return nil } -func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - dir, name := filer2.FullPath(key).DirAndName() - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: deleteIncludeChunks, - } +func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { - glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(0).Infof("delete entry %s: %v", key, err) - return fmt.Errorf("delete entry %s: %v", key, err) - } + dir, name := util.FullPath(key).DirAndName() - return nil - }) + glog.V(4).Infof("delete entry: %v", key) + err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures) + if err != nil { + glog.V(0).Infof("delete entry %s: %v", key, err) + return fmt.Errorf("delete entry %s: %v", key, err) + } + return nil } -func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -97,21 +109,21 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil { - if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { - glog.V(0).Infof("already replicated %s", key) + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { + if filer.ETag(resp.Entry) == filer.ETag(entry) { + glog.V(3).Infof("already replicated %s", key) return nil } } - replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, key) if err != nil { - glog.V(0).Infof("replicate entry chunks %s: %v", key, err) - return fmt.Errorf("replicate entry chunks %s: %v", key, err) + // only warning here since the source chunk may have been deleted already + glog.Warningf("replicate entry chunks %s: %v", key, err) } - glog.V(0).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) + glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -120,11 +132,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p IsDirectory: entry.IsDirectory, Attributes: entry.Attributes, Chunks: replicatedChunks, + Content: entry.Content, }, + IsFromOtherCluster: true, + Signatures: signatures, } - glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + glog.V(3).Infof("create: %v", request) + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -133,13 +148,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p }) } -func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -147,7 +162,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -162,28 +177,31 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file return false, fmt.Errorf("lookup %s: %v", key, err) } - glog.V(0).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) + glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime { // skip if already changed // this usually happens when the messages are not ordered - glog.V(0).Infof("late updates %s", key) - } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) { + glog.V(2).Infof("late updates %s", key) + } else if filer.ETag(newEntry) == filer.ETag(existingEntry) { // skip if no change // this usually happens when retrying the replication - glog.V(0).Infof("already replicated %s", key) + glog.V(3).Infof("already replicated %s", key) } else { // find out what changed - deletedChunks, newChunks := compareChunks(oldEntry, newEntry) + deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) + if err != nil { + return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err) + } // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(ctx, newChunks) + replicatedChunks, err := fs.replicateChunks(newChunks, key) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -191,14 +209,16 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ - Directory: newParentPath, - Entry: existingEntry, + Directory: newParentPath, + Entry: existingEntry, + IsFromOtherCluster: true, + Signatures: signatures, } - if _, err := client.UpdateEntry(ctx, request); err != nil { + if _, err := client.UpdateEntry(context.Background(), request); err != nil { return fmt.Errorf("update existingEntry %s: %v", key, err) } @@ -206,8 +226,21 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file }) } -func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) { - deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks) - newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks) +func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { + aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks) + if aErr != nil { + return nil, nil, aErr + } + bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks) + if bErr != nil { + return nil, nil, bErr + } + + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...) + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...) + + newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...) + newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...) + return } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index abd7c49b9..5cf5b7317 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -3,23 +3,26 @@ package gcssink import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "os" "cloud.google.com/go/storage" - "github.com/chrislusf/seaweedfs/weed/filer2" + "google.golang.org/api/option" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/api/option" ) type GcsSink struct { - client *storage.Client - bucket string - dir string - filerSource *source.FilerSource + client *storage.Client + bucket string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -34,11 +37,16 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) IsIncremental() bool { + return g.isIncremental +} + +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -50,7 +58,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str g.bucket = bucketName g.dir = dir - ctx := context.Background() // Creates a client. if google_application_credentials == "" { var found bool @@ -59,7 +66,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } - client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials)) + client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { glog.Fatalf("Failed to create client: %v", err) } @@ -69,13 +76,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,35 +90,24 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele } -func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { if entry.IsDirectory { return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - wc.Write(data) - }) - - if err != nil { - return err - } + wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) + defer wc.Close() + writeFunc := func(data []byte) error { + _, writeErr := wc.Write(data) + return writeErr } - if err := wc.Close(); err != nil { + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { return err } @@ -119,7 +115,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E } -func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/localsink/local_incremental_sink.go b/weed/replication/sink/localsink/local_incremental_sink.go new file mode 100644 index 000000000..a1d49e28a --- /dev/null +++ b/weed/replication/sink/localsink/local_incremental_sink.go @@ -0,0 +1,17 @@ +package localsink + +import ( + "github.com/chrislusf/seaweedfs/weed/replication/sink" +) + +type LocalIncSink struct { + LocalSink +} + +func (localincsink *LocalIncSink) GetName() string { + return "local_incremental" +} + +func init() { + sink.Sinks = append(sink.Sinks, &LocalIncSink{}) +} diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go new file mode 100644 index 000000000..2b9b3e69a --- /dev/null +++ b/weed/replication/sink/localsink/local_sink.go @@ -0,0 +1,105 @@ +package localsink + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +type LocalSink struct { + Dir string + filerSource *source.FilerSource +} + +func init() { + sink.Sinks = append(sink.Sinks, &LocalSink{}) +} + +func (localsink *LocalSink) SetSourceFiler(s *source.FilerSource) { + localsink.filerSource = s +} + +func (localsink *LocalSink) GetName() string { + return "local" +} + +func (localsink *LocalSink) isMultiPartEntry(key string) bool { + return strings.HasSuffix(key, ".part") && strings.Contains(key, "/.uploads/") +} + +func (localsink *LocalSink) initialize(dir string) error { + localsink.Dir = dir + return nil +} + +func (localsink *LocalSink) Initialize(configuration util.Configuration, prefix string) error { + dir := configuration.GetString(prefix + "directory") + glog.V(4).Infof("sink.local.directory: %v", dir) + return localsink.initialize(dir) +} + +func (localsink *LocalSink) GetSinkToDirectory() string { + return localsink.Dir +} + +func (localsink *LocalSink) IsIncremental() bool { + return true +} + +func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { + if localsink.isMultiPartEntry(key) { + return nil + } + glog.V(4).Infof("Delete Entry key: %s", key) + if err := os.Remove(key); err != nil { + glog.V(0).Infof("remove entry key %s: %s", key, err) + } + return nil +} + +func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { + if entry.IsDirectory || localsink.isMultiPartEntry(key) { + return nil + } + glog.V(4).Infof("Create Entry key: %s", key) + + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + + dir := filepath.Dir(key) + + if _, err := os.Stat(dir); os.IsNotExist(err) { + glog.V(4).Infof("Create Direcotry key: %s", dir) + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + writeFunc := func(data []byte) error { + writeErr := ioutil.WriteFile(key, data, 0755) + return writeErr + } + + if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil { + return err + } + + return nil +} + +func (localsink *LocalSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { + if localsink.isMultiPartEntry(key) { + return true, nil + } + glog.V(4).Infof("Update Entry key: %s", key) + // do delete and create + return false, nil +} diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index dd54f0005..4ffd09462 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,7 +1,6 @@ package sink import ( - "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -9,12 +8,13 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error - DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error - UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + Initialize(configuration util.Configuration, prefix string) error + DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error + CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error + UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) + IsIncremental() bool } var ( diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 4cff341d0..9a36573e3 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -11,7 +11,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/chrislusf/seaweedfs/weed/filer2" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -20,11 +21,13 @@ import ( ) type S3Sink struct { - conn s3iface.S3API - region string - bucket string - dir string - filerSource *source.FilerSource + conn s3iface.S3API + region string + bucket string + dir string + endpoint string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -39,16 +42,24 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) IsIncremental() bool { + return s3sink.isIncremental +} + +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) + glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint")) + glog.V(0).Infof("sink.s3.is_incremental: %v", configuration.GetString(prefix+"is_incremental")) + s3sink.isIncremental = configuration.GetBool(prefix + "is_incremental") return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"endpoint"), ) } @@ -56,13 +67,16 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) { s3sink.filerSource = s } -func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error { +func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error { s3sink.region = region s3sink.bucket = bucket s3sink.dir = dir + s3sink.endpoint = endpoint config := &aws.Config{ - Region: aws.String(s3sink.region), + Region: aws.String(s3sink.region), + Endpoint: aws.String(s3sink.endpoint), + S3ForcePathStyle: aws.Bool(true), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") @@ -77,7 +91,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -89,8 +103,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, } -func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - +func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) if entry.IsDirectory { @@ -99,38 +112,40 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ uploadId, err := s3sink.createMultipartUpload(key, entry) if err != nil { - return err + return fmt.Errorf("createMultipartUpload: %v", err) } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + + parts := make([]*s3.CompletedPart, len(chunkViews)) - var parts []*s3.CompletedPart var wg sync.WaitGroup for chunkIndex, chunk := range chunkViews { partId := chunkIndex + 1 wg.Add(1) - go func(chunk *filer2.ChunkView) { + go func(chunk *filer.ChunkView, index int) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr + glog.Errorf("uploadPart: %v", uploadErr) } else { - parts = append(parts, part) + parts[index] = part } - }(chunk) + }(chunk, chunkIndex) } wg.Wait() if err != nil { s3sink.abortMultipartUpload(key, uploadId) - return err + return fmt.Errorf("uploadPart: %v", err) } - return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 0a190b27d..3dde52616 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -24,7 +24,7 @@ func (s3sink *S3Sink) deleteObject(key string) error { result, err := s3sink.conn.DeleteObject(input) if err == nil { - glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) } @@ -43,7 +43,7 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) ( result, err := s3sink.conn.CreateMultipartUpload(input) if err == nil { - glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err) return "", err @@ -94,19 +94,20 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId result, err := s3sink.conn.CompleteMultipartUpload(input) if err == nil { - glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) + return fmt.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) } - return err + return nil } // To upload a part -func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) + readSeeker, err := s3sink.buildReadSeeker(chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -122,7 +123,7 @@ func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, part result, err := s3sink.conn.UploadPart(input) if err == nil { - glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) + glog.V(2).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) } else { glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err) } @@ -156,12 +157,19 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) +func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) { + fileUrls, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true) + for _, fileUrl := range fileUrls { + _, err = util.ReadUrl(fileUrl, chunk.CipherKey, chunk.IsGzipped, false, chunk.Offset, int(chunk.Size), buf) + if err != nil { + glog.V(1).Infof("read from %s: %v", fileUrl, err) + } else { + break + } + } return bytes.NewReader(buf), nil } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index d7b5ebc4d..e2e3575dc 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,13 +3,15 @@ package source import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "google.golang.org/grpc" "io" "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -23,32 +25,41 @@ type FilerSource struct { grpcAddress string grpcDialOption grpc.DialOption Dir string + address string + proxyByFiler bool } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { - return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { + return fs.DoInitialize( + "", + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + false, ) } -func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { +func (fs *FilerSource) DoInitialize(address, grpcAddress string, dir string, readChunkFromFiler bool) (err error) { + fs.address = address + if fs.address == "" { + fs.address = pb.GrpcAddressToServerAddress(grpcAddress) + } fs.grpcAddress = grpcAddress fs.Dir = dir - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + fs.proxyByFiler = readChunkFromFiler return nil } -func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -62,42 +73,65 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s if err != nil { glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err) - return "", fmt.Errorf("LookupFileId volume id %s: %v", vid, err) + return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err) } locations := vid2Locations[vid] if locations == nil || len(locations.Locations) == 0 { glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err) - return "", fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) + return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) } - fileUrl = fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, part) + if !fs.proxyByFiler { + for _, loc := range locations.Locations { + fileUrls = append(fileUrls, fmt.Sprintf("http://%s/%s?readDeleted=true", loc.Url, part)) + } + } else { + fileUrls = append(fileUrls, fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, part)) + } return } -func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) { - fileUrl, err := fs.LookupFileId(ctx, part) + if fs.proxyByFiler { + return util.DownloadFile("http://" + fs.address + "/?proxyChunkId=" + fileId) + } + + fileUrls, err := fs.LookupFileId(fileId) if err != nil { return "", nil, nil, err } - filename, header, readCloser, err = util.DownloadFile(fileUrl) + for _, fileUrl := range fileUrls { + filename, header, resp, err = util.DownloadFile(fileUrl) + if err != nil { + glog.V(1).Infof("fail to read from %s: %v", fileUrl, err) + } else { + break + } + } - return filename, header, readCloser, err + return filename, header, resp, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSource{}) + +func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } +func (fs *FilerSource) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} + func volumeId(fileId string) string { lastCommaIndex := strings.LastIndex(fileId, ",") if lastCommaIndex > 0 { diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index bed26c79c..642834c72 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,14 +27,14 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } @@ -68,7 +68,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que return nil } -func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { // receive message result, err := k.svc.ReceiveMessage(&sqs.ReceiveMessageInput{ @@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif } // process the message - key = *result.Messages[0].Attributes["key"] + // fmt.Printf("messages: %+v\n", result.Messages[0]) + keyValue := result.Messages[0].MessageAttributes["key"] + key = *keyValue.StringValue text := *result.Messages[0].Body message = &filer_pb.EventNotification{} err = proto.UnmarshalText(text, message) diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index eddba9ff8..b16eec2e1 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -2,13 +2,20 @@ package sub import ( "context" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" + "gocloud.dev/pubsub/rabbitpubsub" + "net/url" + "os" + "path" + "strings" + "time" + // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -19,32 +26,139 @@ func init() { NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{}) } +func getPath(rawUrl string) string { + parsedUrl, _ := url.Parse(rawUrl) + return path.Join(parsedUrl.Host, parsedUrl.Path) +} + +func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl string) error { + exchangeName := getPath(exchangeUrl) + queueName := getPath(queueUrl) + exchangeNameDLX := "DLX." + exchangeName + queueNameDLX := "DLX." + queueName + ch, err := conn.Channel() + if err != nil { + glog.Error(err) + return err + } + defer ch.Close() + if err := ch.ExchangeDeclare( + exchangeNameDLX, "fanout", false, false, false, false, nil); err != nil { + glog.Error(err) + return err + } + if err := ch.ExchangeDeclare( + exchangeName, "fanout", false, false, false, false, nil); err != nil { + glog.Error(err) + return err + } + if _, err := ch.QueueDeclare( + queueName, false, false, false, false, + amqp.Table{"x-dead-letter-exchange": exchangeNameDLX}); err != nil { + glog.Error(err) + return err + } + if err := ch.QueueBind(queueName, "", exchangeName, false, nil); err != nil { + glog.Error(err) + return err + } + if _, err := ch.QueueDeclare( + queueNameDLX, false, false, false, false, + amqp.Table{"x-dead-letter-exchange": exchangeName, "x-message-ttl": 600000}); err != nil { + glog.Error(err) + return err + } + if err := ch.QueueBind(queueNameDLX, "", exchangeNameDLX, false, nil); err != nil { + glog.Error(err) + return err + } + return nil +} + type GoCDKPubSubInput struct { - sub *pubsub.Subscription + sub *pubsub.Subscription + subURL string } func (k *GoCDKPubSubInput) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { - subURL := config.GetString("sub_url") - glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) - sub, err := pubsub.OpenSubscription(context.Background(), subURL) +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + topicUrl := configuration.GetString(prefix + "topic_url") + k.subURL = configuration.GetString(prefix + "sub_url") + glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL) + sub, err := pubsub.OpenSubscription(context.Background(), k.subURL) if err != nil { return err } + var conn *amqp.Connection + if sub.As(&conn) { + ch, err := conn.Channel() + if err != nil { + return err + } + defer ch.Close() + _, err = ch.QueueInspect(getPath(k.subURL)) + if err != nil { + if strings.HasPrefix(err.Error(), "Exception (404) Reason") { + if err := QueueDeclareAndBind(conn, topicUrl, k.subURL); err != nil { + return err + } + } else { + return err + } + } + } k.sub = sub return nil } -func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { - msg, err := k.sub.Receive(context.Background()) +func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { + ctx := context.Background() + msg, err := k.sub.Receive(ctx) + if err != nil { + var conn *amqp.Connection + if k.sub.As(&conn) && conn.IsClosed() { + conn.Close() + k.sub.Shutdown(ctx) + conn, err = amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) + if err != nil { + glog.Error(err) + time.Sleep(time.Second) + return + } + k.sub = rabbitpubsub.OpenSubscription(conn, getPath(k.subURL), nil) + return + } + // This is permanent cached sub err + glog.Fatal(err) + } + onFailureFn = func() { + if msg.Nackable() { + isRedelivered := false + var delivery amqp.Delivery + if msg.As(&delivery) { + isRedelivered = delivery.Redelivered + glog.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered) + } + if isRedelivered { + if err := delivery.Nack(false, false); err != nil { + glog.Error(err) + } + } else { + msg.Nack() + } + } + } + onSuccessFn = func() { + msg.Ack() + } key = msg.Metadata["key"] message = &filer_pb.EventNotification{} err = proto.Unmarshal(msg.Body, message) if err != nil { - return "", nil, err + return "", nil, onSuccessFn, onFailureFn, err } - return key, message, nil + return key, message, onSuccessFn, onFailureFn, nil } diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..f7c767d4a 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } @@ -85,16 +85,22 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId go k.sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { k.messageChan <- m - m.Ack() }) return err } -func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { m := <-k.messageChan + onSuccessFn = func() { + m.Ack() + } + onFailureFn = func() { + m.Nack() + } + // process the message key = m.Attributes["key"] message = &filer_pb.EventNotification{} diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..622a759ea 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } @@ -97,7 +97,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, return nil } -func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { msg := <-k.messageChan diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..d5a910db9 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,8 +9,8 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error - ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) + Initialize(configuration util.Configuration, prefix string) error + ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) } var ( diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go new file mode 100644 index 000000000..b8af6381a --- /dev/null +++ b/weed/s3api/auth_credentials.go @@ -0,0 +1,275 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "io/ioutil" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" +) + +type Action string + +type Iam interface { + Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc +} + +type IdentityAccessManagement struct { + identities []*Identity + domain string +} + +type Identity struct { + Name string + Credentials []*Credential + Actions []Action +} + +type Credential struct { + AccessKey string + SecretKey string +} + +func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement { + iam := &IdentityAccessManagement{ + domain: option.DomainName, + } + if option.Config != "" { + if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { + glog.Fatalf("fail to load config file %s: %v", option.Config, err) + } + } else { + if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil { + glog.Warningf("fail to load config: %v", err) + } + } + return iam +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error { + content, err := filer.ReadContent(option.Filer, filer.IamConfigDirecotry, filer.IamIdentityFile) + if err != nil { + return fmt.Errorf("read S3 config: %v", err) + } + return iam.loadS3ApiConfigurationFromBytes(content) +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { + content, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + return iam.loadS3ApiConfigurationFromBytes(content) +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromBytes(content []byte) error { + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal error: %v", err) + } + if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil { + return err + } + return nil +} + +func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3ApiConfiguration) error { + var identities []*Identity + for _, ident := range config.Identities { + t := &Identity{ + Name: ident.Name, + Credentials: nil, + Actions: nil, + } + for _, action := range ident.Actions { + t.Actions = append(t.Actions, Action(action)) + } + for _, cred := range ident.Credentials { + t.Credentials = append(t.Credentials, &Credential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + }) + } + identities = append(identities, t) + } + + // atomically switch + iam.identities = identities + return nil +} + +func (iam *IdentityAccessManagement) isEnabled() bool { + + return len(iam.identities) > 0 +} + +func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + if cred.AccessKey == accessKey { + return ident, cred, true + } + } + } + return nil, nil, false +} + +func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) { + + for _, ident := range iam.identities { + if ident.Name == "anonymous" { + return ident, true + } + } + return nil, false +} + +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { + + if !iam.isEnabled() { + return f + } + + return func(w http.ResponseWriter, r *http.Request) { + identity, errCode := iam.authRequest(r, action) + if errCode == s3err.ErrNone { + if identity != nil && identity.Name != "" { + r.Header.Set(xhttp.AmzIdentityId, identity.Name) + if identity.isAdmin() { + r.Header.Set(xhttp.AmzIsAdmin, "true") + } + } + f(w, r) + return + } + writeErrorResponse(w, errCode, r.URL) + } +} + +// check whether the request has valid access keys +func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) { + var identity *Identity + var s3Err s3err.ErrorCode + var found bool + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return identity, s3err.ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return identity, s3err.ErrNone + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return identity, s3err.ErrNotImplemented + case authTypeAnonymous: + identity, found = iam.lookupAnonymous() + if !found { + return identity, s3err.ErrAccessDenied + } + default: + return identity, s3err.ErrNotImplemented + } + + if s3Err != s3err.ErrNone { + return identity, s3Err + } + + glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) + + bucket, _ := getBucketAndObject(r) + + if !identity.canDo(action, bucket) { + return identity, s3err.ErrAccessDenied + } + + return identity, s3err.ErrNone + +} + +func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) { + var identity *Identity + var s3Err s3err.ErrorCode + var found bool + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return identity, s3err.ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return identity, s3err.ErrNone + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return identity, s3err.ErrNotImplemented + case authTypeAnonymous: + identity, found = iam.lookupAnonymous() + if !found { + return identity, s3err.ErrAccessDenied + } + default: + return identity, s3err.ErrNotImplemented + } + + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != s3err.ErrNone { + return identity, s3Err + } + return identity, s3err.ErrNone +} + +func (identity *Identity) canDo(action Action, bucket string) bool { + if identity.isAdmin() { + return true + } + for _, a := range identity.Actions { + if a == action { + return true + } + } + if bucket == "" { + return false + } + limitedByBucket := string(action) + ":" + bucket + adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket + for _, a := range identity.Actions { + if string(a) == limitedByBucket { + return true + } + if string(a) == adminLimitedByBucket { + return true + } + } + return false +} + +func (identity *Identity) isAdmin() bool { + for _, a := range identity.Actions { + if a == "Admin" { + return true + } + } + return false +} diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go new file mode 100644 index 000000000..ea4b69550 --- /dev/null +++ b/weed/s3api/auth_credentials_subscribe.go @@ -0,0 +1,70 @@ +package s3api + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "time" +) + +func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error { + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + + message := resp.EventNotification + if message.NewEntry == nil { + return nil + } + + dir := resp.Directory + + if message.NewParentPath != "" { + dir = message.NewParentPath + } + if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile { + if err := s3a.iam.loadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil { + return err + } + glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile) + } + + return nil + } + + for { + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: clientName, + PathPrefix: prefix, + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + glog.Fatalf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.Errorf("subscribing filer meta change: %v", err) + } + time.Sleep(time.Second) + } +} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go new file mode 100644 index 000000000..0383ddbcd --- /dev/null +++ b/weed/s3api/auth_credentials_test.go @@ -0,0 +1,69 @@ +package s3api + +import ( + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "testing" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func TestIdentityListFileFormat(t *testing.T) { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + identity1 := &iam_pb.Identity{ + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + } + identity2 := &iam_pb.Identity{ + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + }, + } + identity3 := &iam_pb.Identity{ + Name: "some_normal_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_WRITE, + }, + } + + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, _ := m.MarshalToString(s3ApiConfiguration) + + println(text) + +} diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go new file mode 100644 index 000000000..5694a96ac --- /dev/null +++ b/weed/s3api/auth_signature_v2.go @@ -0,0 +1,427 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Verify if request has valid AWS Signature Version '2'. +func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) { + if isRequestSignatureV2(r) { + return iam.doesSignV2Match(r) + } + return iam.doesPresignV2SignatureMatch(r) +} + +func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode { + accessKey := formValues.Get("AWSAccessKeyId") + _, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return s3err.ErrInvalidAccessKeyID + } + policy := formValues.Get("Policy") + signature := formValues.Get("Signature") + if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { + return s3err.ErrSignatureDoesNotMatch + } + return s3err.ErrNone +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// <HTTP-Request-URI, from the protocol name up to the query string> + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = <described below> + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) { + if v2Auth == "" { + return "", s3err.ErrAuthHeaderEmpty + } + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return "", s3err.ErrSignatureVersionNotSupported + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") + if len(authFields) != 2 { + return "", s3err.ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return "", s3err.ErrMissingFields + } + + return keySignFields[0], s3err.ErrNone +} + +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != s3err.ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, s3err.ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return ident, s3err.ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, s3err.ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, s3err.ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, s3err.ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, s3err.ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + return ident, s3err.ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. +func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go new file mode 100644 index 000000000..0df26e6fc --- /dev/null +++ b/weed/s3api/auth_signature_v4.go @@ -0,0 +1,770 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) { + sha256sum := getContentSha256Cksum(r) + switch { + case isRequestSignatureV4(r): + return iam.doesSignatureMatch(sha256sum, r) + case isRequestPresignedSignatureV4(r): + return iam.doesPresignedSignatureMatch(sha256sum, r) + } + return nil, s3err.ErrAccessDenied +} + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != s3err.ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, s3err.ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, s3err.ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get hashed Payload + if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { + buf, _ := ioutil.ReadAll(r.Body) + r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) + b, _ := ioutil.ReadAll(bytes.NewBuffer(buf)) + if len(b) != 0 { + bodyHash := sha256.Sum256(b) + hashedPayload = hex.EncodeToString(bodyHash[:]) + } + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, + signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, + signV4Values.Credential.scope.service) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, s3err.ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// +func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) { + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.Replace(v4Auth, " ", "", -1) + if v4Auth == "" { + return sv, s3err.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, s3err.ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, s3err.ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var err s3err.ErrorCode + // Save credentail values. + signV4Values.Credential, err = parseCredentialHeader(authFields[0]) + if err != s3err.ErrNone { + return sv, err + } + + // Save signed headers. + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != s3err.ErrNone { + return sv, err + } + + // Save signature. + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != s3err.ErrNone { + return sv, err + } + + // Return the structure here. + return signV4Values, s3err.ErrNone +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) { + creds := strings.Split(strings.TrimSpace(credElement), "=") + if len(creds) != 2 { + return ch, s3err.ErrMissingFields + } + if creds[0] != "Credential" { + return ch, s3err.ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + if len(credElements) != 5 { + return ch, s3err.ErrCredMalformed + } + // Save access key id. + cred := credentialHeader{ + accessKey: credElements[0], + } + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) + if e != nil { + return ch, s3err.ErrMalformedCredentialDate + } + + cred.scope.region = credElements[2] + cred.scope.service = credElements[3] // "s3" + cred.scope.request = credElements[4] // "aws4_request" + return cred, s3err.ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, s3err.ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, s3err.ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, s3err.ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, s3err.ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, s3err.ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", s3err.ErrMissingFields + } + if signFields[0] != "Signature" { + return "", s3err.ErrMissingSignTag + } + if signFields[1] == "" { + return "", s3err.ErrMissingFields + } + signature := signFields[1] + return signature, s3err.ErrNone +} + +// doesPolicySignatureMatch - Verify query headers with post policy +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// returns ErrNone if the signature matches. +func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode { + + // Parse credential tag. + credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential")) + if err != s3err.ErrNone { + return s3err.ErrMissingFields + } + + _, cred, found := iam.lookupByAccessKey(credHeader.accessKey) + if !found { + return s3err.ErrInvalidAccessKeyID + } + + // Get signing key. + signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service) + + // Get signature. + newSignature := getSignature(signingKey, formValues.Get("Policy")) + + // Verify signature. + if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) { + return s3err.ErrSignatureDoesNotMatch + } + + // Success. + return s3err.ErrNone +} + +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != s3err.ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, s3err.ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, s3err.ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, s3err.ErrContentSHA256Mismatch + } + } + + /// Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, + pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, + pSignValues.Credential.scope.service) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return identity, s3err.ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return s3err.ErrInvalidQueryParams + } + } + return s3err.ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) { + var err s3err.ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != s3err.ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, s3err.ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != s3err.ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, s3err.ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, s3err.ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, s3err.ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, s3err.ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != s3err.ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != s3err.ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, s3err.ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) { + reqHeaders := r.Header + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, s3err.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, s3err.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, s3err.ErrNone +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// <HTTPMethod>\n +// <CanonicalURI>\n +// <CanonicalQueryString>\n +// <CanonicalHeaders>\n +// <SignedHeaders>\n +// <HashedPayload> +// +func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.Replace(queryStr, "+", "%20", -1) + encodedPath := encodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + getSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string, service string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + serviceBytes := sumHMAC(regionBytes, []byte(service)) + signingKey := sumHMAC(serviceBytes, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func encodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go new file mode 100644 index 000000000..b47cd5f2d --- /dev/null +++ b/weed/s3api/auto_signature_v4_test.go @@ -0,0 +1,421 @@ +package s3api + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" +) + +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. +func TestIsRequestPresignedSignatureV4(t *testing.T) { + testCases := []struct { + inputQueryKey string + inputQueryValue string + expectedResult bool + }{ + // Test case - 1. + // Test case with query key ""X-Amz-Credential" set. + {"", "", false}, + // Test case - 2. + {"X-Amz-Credential", "", true}, + // Test case - 3. + {"X-Amz-Content-Sha256", "", false}, + } + + for i, testCase := range testCases { + // creating an input HTTP request. + // Only the query parameters are relevant for this particular test. + inputReq, err := http.NewRequest("GET", "http://example.com", nil) + if err != nil { + t.Fatalf("Error initializing input HTTP request: %v", err) + } + q := inputReq.URL.Query() + q.Add(testCase.inputQueryKey, testCase.inputQueryValue) + inputReq.URL.RawQuery = q.Encode() + + actualResult := isRequestPresignedSignatureV4(inputReq) + if testCase.expectedResult != actualResult { + t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) + } + } +} + +// Tests is requested authenticated function, tests replies for s3 errors. +func TestIsReqAuthenticated(t *testing.T) { + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + // List of test cases for validating http request authentication. + testCases := []struct { + req *http.Request + s3Error s3err.ErrorCode + }{ + // When request is unsigned, access denied is returned. + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied}, + // When request is properly signed, error is none. + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone}, + } + + // Validates all testcases. + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { + ioutil.ReadAll(testCase.req.Body) + t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) + } + } +} + +func TestCheckAdminRequestAuthType(t *testing.T) { + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + testCases := []struct { + Request *http.Request + ErrCode s3err.ErrorCode + }{ + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + } + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { + t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) + } + } +} + +// Provides a fully populated http request instance, fails otherwise. +func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req, err := newTestRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("Unable to initialize new http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is signed with AWS Signature V4, fails if not able to do so. +func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is presigned with AWS Signature V4, fails if not able to do so. +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// Returns new HTTP request object. +func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash in hex encoding of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// Sign given request using Signature V4. +func signRequestV4(req *http.Request, accessKey, secretKey string) error { + // Get hashed payload. + hashedPayload := req.Header.Get("x-amz-content-sha256") + if hashedPayload == "" { + return fmt.Errorf("Invalid hashed payload") + } + + currTime := time.Now() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + + // Get header keys. + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + region := "us-east-1" + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + canonicalHeaders := buf.String() + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + + // Get canonical query string. + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + + // Get canonical URI. + canonicalURI := EncodePath(req.URL.Path) + + // Get canonical request. + // canonicalRequest = + // <HTTPMethod>\n + // <CanonicalURI>\n + // <CanonicalQueryString>\n + // <CanonicalHeaders>\n + // <SignedHeaders>\n + // <HashedPayload> + // + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + req.URL.RawQuery, + canonicalHeaders, + signedHeaders, + hashedPayload, + }, "\n") + + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + + stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + + date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) + regionHMAC := sumHMAC(date, []byte(region)) + service := sumHMAC(regionHMAC, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + + signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) + + // final Authorization header + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return nil +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region, "s3") + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 061fd4a92..b163ec2f6 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,17 +21,116 @@ package s3api import ( "bufio" "bytes" + "crypto/sha256" + "encoding/hex" "errors" - "github.com/dustin/go-humanize" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "hash" "io" "net/http" -) + "time" -// Streaming AWS Signature Version '4' constants. -const ( - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + "github.com/dustin/go-humanize" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region, "s3") + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + +// calculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode s3err.ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth) + if errCode != s3err.ErrNone { + return nil, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, "", "", time.Time{}, errCode + } + // Verify if the access key id matches. + _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return nil, "", "", time.Time{}, s3err.ErrMissingDateHeader + } + } + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return nil, "", "", time.Time{}, s3err.ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3") + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, s3err.ErrNone +} + const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. @@ -43,22 +142,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newSignV4ChunkedReader(req *http.Request) io.ReadCloser { - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - state: readChunkHeader, +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != s3err.ErrNone { + return nil, errCode } + return &s3ChunkedReader{ + cred: ident, + reader: bufio.NewReader(req.Body), + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + state: readChunkHeader, + }, s3err.ErrNone } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - state chunkState - lastChunk bool - chunkSignature string - n uint64 // Unread bytes in chunk - err error + cred *Credential + reader *bufio.Reader + seedSignature string + seedDate time.Time + region string + state chunkState + lastChunk bool + chunkSignature string + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + n uint64 // Unread bytes in chunk + err error } // Read chunk reads the chunk token signature portion. @@ -157,6 +270,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { return 0, cr.err } + // Calculate sha256. + cr.chunkSHA256Writer.Write(rbuf[:n0]) + // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -169,6 +285,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err + } + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk } else { diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d3bde66ee..f882592c1 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,9 +1,9 @@ package s3api import ( - "context" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "path/filepath" "strconv" "strings" @@ -11,10 +11,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/google/uuid" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/google/uuid" ) type InitiateMultipartUploadResult struct { @@ -22,18 +23,21 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) { + + glog.V(2).Infof("createMultipartUpload input %v", input) + uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() - if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } entry.Extended["key"] = []byte(*input.Key) }); err != nil { glog.Errorf("NewMultipartUpload error: %v", err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } output = &InitiateMultipartUploadResult{ @@ -52,14 +56,16 @@ type CompleteMultipartUploadResult struct { s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) { + + glog.V(2).Infof("completeMultipartUpload input %v", input) uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) - if err != nil { - glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + entries, _, err := s3a.list(uploadDirectory, "", "", false, 0) + if err != nil || len(entries) == 0 { + glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) + return nil, s3err.ErrNoSuchUpload } var finalParts []*filer_pb.FileChunk @@ -69,11 +75,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.GetFileIdString(), - Offset: offset, - Size: chunk.Size, - Mtime: chunk.Mtime, - ETag: chunk.ETag, + FileId: chunk.GetFileIdString(), + Offset: offset, + Size: chunk.Size, + Mtime: chunk.Mtime, + CipherKey: chunk.CipherKey, + ETag: chunk.ETag, } finalParts = append(finalParts, p) offset += int64(chunk.Size) @@ -96,78 +103,103 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C dirName = dirName[:len(dirName)-1] } - err = s3a.mkFile(ctx, dirName, entryName, finalParts) + err = s3a.mkFile(dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } output = &CompleteMultipartUploadResult{ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), Bucket: input.Bucket, - ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""), + ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), Key: objectKey(input.Key), }, } - if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) { + + glog.V(2).Infof("abortMultipartUpload input %v", input) - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + return nil, s3err.ErrNoSuchUpload } if exists { - err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } - return &s3.AbortMultipartUploadOutput{}, ErrNone + return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone } type ListMultipartUploadsResult struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"` - s3.ListMultipartUploadsOutput + + // copied from s3.ListMultipartUploadsOutput, the Uploads is not converting to <Upload></Upload> + Bucket *string `type:"string"` + Delimiter *string `type:"string"` + EncodingType *string `type:"string" enum:"EncodingType"` + IsTruncated *bool `type:"boolean"` + KeyMarker *string `type:"string"` + MaxUploads *int64 `type:"integer"` + NextKeyMarker *string `type:"string"` + NextUploadIdMarker *string `type:"string"` + Prefix *string `type:"string"` + UploadIdMarker *string `type:"string"` + Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } -func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + + glog.V(2).Infof("listMultipartUploads input %v", input) output = &ListMultipartUploadsResult{ - ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ - Bucket: input.Bucket, - Delimiter: input.Delimiter, - EncodingType: input.EncodingType, - KeyMarker: input.KeyMarker, - MaxUploads: input.MaxUploads, - Prefix: input.Prefix, - }, + Bucket: input.Bucket, + Delimiter: input.Delimiter, + EncodingType: input.EncodingType, + KeyMarker: input.KeyMarker, + MaxUploads: input.MaxUploads, + Prefix: input.Prefix, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, uint32(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return } + output.IsTruncated = aws.Bool(!isLast) for _, entry := range entries { if entry.Extended != nil { - key := entry.Extended["key"] - output.Uploads = append(output.Uploads, &s3.MultipartUpload{ - Key: objectKey(aws.String(string(key))), + key := string(entry.Extended["key"]) + if *input.KeyMarker != "" && *input.KeyMarker != key { + continue + } + if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) { + continue + } + output.Upload = append(output.Upload, &s3.MultipartUpload{ + Key: objectKey(aws.String(key)), UploadId: aws.String(entry.Name), }) + if !isLast { + output.NextUploadIdMarker = aws.String(entry.Name) + } } } @@ -176,27 +208,41 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List type ListPartsResult struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` - s3.ListPartsOutput + + // copied from s3.ListPartsOutput, the Parts is not converting to <Part></Part> + Bucket *string `type:"string"` + IsTruncated *bool `type:"boolean"` + Key *string `min:"1" type:"string"` + MaxParts *int64 `type:"integer"` + NextPartNumberMarker *int64 `type:"integer"` + PartNumberMarker *int64 `type:"integer"` + Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"` + StorageClass *string `type:"string" enum:"StorageClass"` + UploadId *string `type:"string"` } -func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + + glog.V(2).Infof("listObjectParts input %v", input) + output = &ListPartsResult{ - ListPartsOutput: s3.ListPartsOutput{ - Bucket: input.Bucket, - Key: objectKey(input.Key), - UploadId: input.UploadId, - MaxParts: input.MaxParts, // the maximum number of parts to return. - PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive - }, + Bucket: input.Bucket, + Key: objectKey(input.Key), + UploadId: input.UploadId, + MaxParts: input.MaxParts, // the maximum number of parts to return. + PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive + StorageClass: aws.String("STANDARD"), } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, - "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + return nil, s3err.ErrNoSuchUpload } + output.IsTruncated = aws.Bool(!isLast) + for _, entry := range entries { if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { partNumberString := entry.Name[:len(entry.Name)-len(".part")] @@ -205,12 +251,15 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err) continue } - output.Parts = append(output.Parts, &s3.Part{ + output.Part = append(output.Part, &s3.Part{ PartNumber: aws.Int64(int64(partNumber)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), - Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), + LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()), + Size: aws.Int64(int64(filer.FileSize(entry))), + ETag: aws.String("\"" + filer.ETag(entry) + "\""), }) + if !isLast { + output.NextPartNumberMarker = aws.Int64(int64(partNumber)) + } } } diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go index 835665dd6..f2568b6bc 100644 --- a/weed/s3api/filer_multipart_test.go +++ b/weed/s3api/filer_multipart_test.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "testing" + "time" ) func TestInitiateMultipartUploadResult(t *testing.T) { @@ -24,3 +25,25 @@ func TestInitiateMultipartUploadResult(t *testing.T) { } } + +func TestListPartsResult(t *testing.T) { + + expected := `<?xml version="1.0" encoding="UTF-8"?> +<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Part><ETag>"12345678"</ETag><LastModified>1970-01-01T00:00:00Z</LastModified><PartNumber>1</PartNumber><Size>123</Size></Part></ListPartsResult>` + response := &ListPartsResult{ + Part: []*s3.Part{ + { + PartNumber: aws.Int64(int64(1)), + LastModified: aws.Time(time.Unix(0, 0).UTC()), + Size: aws.Int64(int64(123)), + ETag: aws.String("\"12345678\""), + }, + }, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } + +} diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index ed9612d35..1803332a3 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,164 +3,91 @@ package s3api import ( "context" "fmt" - "io" - "os" - "strings" - "time" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" ) -func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: dirName, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0777 | os.ModeDir), - Uid: OS_UID, - Gid: OS_GID, - }, - } +func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - if fn != nil { - fn(entry) - } + return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn) - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } +} - glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("mkdir %v: %v", request, err) - return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) - } +func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return nil - }) -} + return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks) -func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: fileName, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0770), - Uid: OS_UID, - Gid: OS_GID, - }, - Chunks: chunks, - } +} - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) { - glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create file %v:%v", request, err) - return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error { + entries = append(entries, entry) + if isLastEntry { + isLast = true } - return nil - }) -} + }, startFrom, inclusive, limit) -func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if len(entries) == 0 { + isLast = true + } - request := &filer_pb.ListEntriesRequest{ - Directory: parentDirectoryPath, - Prefix: prefix, - StartFromFileName: startFrom, - InclusiveStartFrom: inclusive, - Limit: uint32(limit), - } + return - glog.V(4).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("read directory %v: %v", request, err) - return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) - } +} - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error { - entries = append(entries, resp.Entry) + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err != nil { + return err } return nil }) - return - } -func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - IsDeleteData: isDeleteData, - IsRecursive: isRecursive, - } +func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error { + request := &filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + } - glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(ctx, request); err != nil { - glog.V(0).Infof("delete entry %v: %v", request, err) - return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) + if resp, err := client.DeleteEntry(context.Background(), request); err != nil { + glog.V(0).Infof("delete entry %v: %v", request, err) + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + } else { + if resp.Error != "" { + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error) } - - return nil - }) - + } + return nil } -func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory) - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - } +} - glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(0).Infof("exists entry %v: %v", request, err) - return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) - } +func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) { - exists = resp.Entry.IsDirectory == isDirectory + return filer_pb.Touch(s3a, parentDirectoryPath, entryName, entry) - return nil - }) +} - return +func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) { + fullPath := util.NewFullPath(parentDirectoryPath, entryName) + return filer_pb.GetEntry(s3a, fullPath) } func objectKey(key *string) *string { diff --git a/weed/s3api/filer_util_tags.go b/weed/s3api/filer_util_tags.go new file mode 100644 index 000000000..75d3b37d0 --- /dev/null +++ b/weed/s3api/filer_util_tags.go @@ -0,0 +1,105 @@ +package s3api + +import ( + "strings" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" +) + +const ( + S3TAG_PREFIX = xhttp.AmzObjectTagging + "-" +) + +func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) { + + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + tags = make(map[string]string) + for k, v := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + tags[k[len(S3TAG_PREFIX):]] = string(v) + } + } + return nil + }) + return +} + +func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, tags map[string]string) (err error) { + + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + + for k, _ := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + delete(resp.Entry.Extended, k) + } + } + + if resp.Entry.Extended == nil { + resp.Entry.Extended = make(map[string][]byte) + } + for k, v := range tags { + resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v) + } + + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: resp.Entry, + IsFromOtherCluster: false, + Signatures: nil, + }) + + }) + +} + +func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (err error) { + + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + + hasDeletion := false + for k, _ := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + delete(resp.Entry.Extended, k) + hasDeletion = true + } + } + + if !hasDeletion { + return nil + } + + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: resp.Entry, + IsFromOtherCluster: false, + Signatures: nil, + }) + + }) + +} diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go new file mode 100644 index 000000000..6614b0af0 --- /dev/null +++ b/weed/s3api/http/header.go @@ -0,0 +1,36 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +// Standard S3 HTTP request constants +const ( + // S3 storage class + AmzStorageClass = "x-amz-storage-class" + + // S3 user-defined metadata + AmzUserMetaPrefix = "X-Amz-Meta-" + + // S3 object tagging + AmzObjectTagging = "X-Amz-Tagging" + AmzTagCount = "x-amz-tagging-count" +) + +// Non-Standard S3 HTTP request constants +const ( + AmzIdentityId = "s3-identity-id" + AmzIsAdmin = "s3-is-admin" // only set to http request header as a context +) diff --git a/weed/s3api/policy/post-policy.go b/weed/s3api/policy/post-policy.go new file mode 100644 index 000000000..5ef8d397d --- /dev/null +++ b/weed/s3api/policy/post-policy.go @@ -0,0 +1,321 @@ +package policy + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net/http" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return errInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return errInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return errInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return s3err.RESTErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} diff --git a/weed/s3api/policy/post-policy_test.go b/weed/s3api/policy/post-policy_test.go new file mode 100644 index 000000000..ce241b723 --- /dev/null +++ b/weed/s3api/policy/post-policy_test.go @@ -0,0 +1,378 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "mime/multipart" + "net/http" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +const ( + iso8601DateFormat = "20060102T150405Z" + iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. +) + +func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey string, expiration time.Time) []byte { + t := time.Now().UTC() + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey) + // Add content length condition, only accept content sizes of a given length. + contentLengthCondStr := `["content-length-range", 1024, 1048576]` + // Add the algorithm condition, only accept AWS SignV4 Sha256. + algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]` + // Add the date condition, only accept the current date. + dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat)) + // Add the credential string, only accept the credential passed. + credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) + // Add the meta-uuid string, set to 1234 + uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr, + keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// newPostPolicyBytesV4 - creates a bare bones postpolicy string with key and bucket matches. +func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration time.Time) []byte { + t := time.Now().UTC() + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey) + // Add the algorithm condition, only accept AWS SignV4 Sha256. + algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]` + // Add the date condition, only accept the current date. + dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat)) + // Add the credential string, only accept the credential passed. + credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) + // Add the meta-uuid string, set to 1234 + uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// newPostPolicyBytesV2 - creates a bare bones postpolicy string with key and bucket matches. +func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []byte { + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["starts-with", "$key", "%s/upload.txt"]`, objectKey) + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup. + +// testPostPolicyBucketHandler - Tests validate post policy handler uploading objects. + +// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup. + +// testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified + +// postPresignSignatureV4 - presigned signature for PostPolicy requests. +func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, t, location) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// copied from auth_signature_v4.go to break import loop +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// copied from auth_signature_v4.go to break import loop +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format("20060102"))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// copied from auth_signature_v4.go to break import loop +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// copied from auth_signature_v4.go to break import loop +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secretKey string) (*http.Request, error) { + // Expire the request five minutes from now. + expirationTime := time.Now().UTC().Add(time.Minute * 5) + // Create a new post policy. + policy := newPostPolicyBytesV2(bucketName, objectName, expirationTime) + // Only need the encoding. + encodedPolicy := base64.StdEncoding.EncodeToString(policy) + + // Presign with V4 signature based on the policy. + signature := calculateSignatureV2(encodedPolicy, secretKey) + + formData := map[string]string{ + "AWSAccessKeyId": accessKey, + "bucket": bucketName, + "key": objectName + "/${filename}", + "policy": encodedPolicy, + "signature": signature, + } + + // Create the multipart form. + var buf bytes.Buffer + w := multipart.NewWriter(&buf) + + // Set the normal formData + for k, v := range formData { + w.WriteField(k, v) + } + // Set the File formData + writer, err := w.CreateFormFile("file", "upload.txt") + if err != nil { + // return nil, err + return nil, err + } + writer.Write([]byte("hello world")) + // Close before creating the new request. + w.Close() + + // Set the body equal to the created policy. + reader := bytes.NewReader(buf.Bytes()) + + req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader) + if err != nil { + return nil, err + } + + // Set form content-type. + req.Header.Set("Content-Type", w.FormDataContentType()) + return req, nil +} + +func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName string, contentLengthRange bool) []byte { + // Expire the request five minutes from now. + expirationTime := t.Add(time.Minute * 5) + + credStr := getCredentialString(accessKey, region, t) + // Create a new post policy. + policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime) + if contentLengthRange { + policy = newPostPolicyBytesV4WithContentRange(credStr, bucketName, objectName, expirationTime) + } + return policy +} + +func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string, + t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { + // Get the user credential. + credStr := getCredentialString(accessKey, region, t) + + // Only need the encoding. + encodedPolicy := base64.StdEncoding.EncodeToString(policy) + + if corruptedB64 { + encodedPolicy = "%!~&" + encodedPolicy + } + + // Presign with V4 signature based on the policy. + signature := postPresignSignatureV4(encodedPolicy, t, secretKey, region) + + formData := map[string]string{ + "bucket": bucketName, + "key": objectName + "/${filename}", + "x-amz-credential": credStr, + "policy": encodedPolicy, + "x-amz-signature": signature, + "x-amz-date": t.Format(iso8601DateFormat), + "x-amz-algorithm": "AWS4-HMAC-SHA256", + "x-amz-meta-uuid": "1234", + "Content-Encoding": "gzip", + } + + // Add form data + for k, v := range addFormData { + formData[k] = v + } + + // Create the multipart form. + var buf bytes.Buffer + w := multipart.NewWriter(&buf) + + // Set the normal formData + for k, v := range formData { + w.WriteField(k, v) + } + // Set the File formData but don't if we want send an incomplete multipart request + if !corruptedMultipart { + writer, err := w.CreateFormFile("file", "upload.txt") + if err != nil { + // return nil, err + return nil, err + } + writer.Write(objData) + // Close before creating the new request. + w.Close() + } + + // Set the body equal to the created policy. + reader := bytes.NewReader(buf.Bytes()) + + req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader) + if err != nil { + return nil, err + } + + // Set form content-type. + req.Header.Set("Content-Type", w.FormDataContentType()) + return req, nil +} + +func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { + t := time.Now().UTC() + region := "us-east-1" + policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, true) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false) +} + +func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { + t := time.Now().UTC() + region := "us-east-1" + policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, false) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false) +} + +// construct URL for http requests for bucket operations. +func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string { + urlStr := endPoint + "/" + if bucketName != "" { + urlStr = urlStr + bucketName + "/" + } + if objectName != "" { + urlStr = urlStr + EncodePath(objectName) + } + if len(queryValues) > 0 { + urlStr = urlStr + "?" + queryValues.Encode() + } + return urlStr +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// getCredentialString generate a credential string. +func getCredentialString(accessKeyID, location string, t time.Time) string { + return accessKeyID + "/" + getScope(t, location) +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format("20060102"), + region, + string("s3"), + "aws4_request", + }, "/") + return scope +} diff --git a/weed/s3api/policy/postpolicyform.go b/weed/s3api/policy/postpolicyform.go new file mode 100644 index 000000000..3a6f3a882 --- /dev/null +++ b/weed/s3api/policy/postpolicyform.go @@ -0,0 +1,276 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "time" +) + +// startWithConds - map which indicates if a given condition supports starts-with policy operator +var startsWithConds = map[string]bool{ + "$acl": true, + "$bucket": false, + "$cache-control": true, + "$content-type": true, + "$content-disposition": true, + "$content-encoding": true, + "$expires": true, + "$key": true, + "$success_action_redirect": true, + "$redirect": true, + "$success_action_status": false, + "$x-amz-algorithm": false, + "$x-amz-credential": false, + "$x-amz-date": false, +} + +// Add policy conditionals. +const ( + policyCondEqual = "eq" + policyCondStartsWith = "starts-with" + policyCondContentLength = "content-length-range" +) + +// toString - Safely convert interface to string without causing panic. +func toString(val interface{}) string { + switch v := val.(type) { + case string: + return v + default: + return "" + } +} + +// toLowerString - safely convert interface to lower string +func toLowerString(val interface{}) string { + return strings.ToLower(toString(val)) +} + +// toInteger _ Safely convert interface to integer without causing panic. +func toInteger(val interface{}) (int64, error) { + switch v := val.(type) { + case float64: + return int64(v), nil + case int64: + return v, nil + case int: + return int64(v), nil + case string: + i, err := strconv.Atoi(v) + return int64(i), err + default: + return 0, errors.New("Invalid number format") + } +} + +// isString - Safely check if val is of type string without causing panic. +func isString(val interface{}) bool { + _, ok := val.(string) + return ok +} + +// ContentLengthRange - policy content-length-range field. +type contentLengthRange struct { + Min int64 + Max int64 + Valid bool // If content-length-range was part of policy +} + +// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string. +type PostPolicyForm struct { + Expiration time.Time // Expiration date and time of the POST policy. + Conditions struct { // Conditional policy structure. + Policies []struct { + Operator string + Key string + Value string + } + ContentLengthRange contentLengthRange + } +} + +// ParsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure. +func ParsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) { + // Convert po into interfaces and + // perform strict type conversion using reflection. + var rawPolicy struct { + Expiration string `json:"expiration"` + Conditions []interface{} `json:"conditions"` + } + + err := json.Unmarshal([]byte(policy), &rawPolicy) + if err != nil { + return ppf, err + } + + parsedPolicy := PostPolicyForm{} + + // Parse expiry time. + parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration) + if err != nil { + return ppf, err + } + + // Parse conditions. + for _, val := range rawPolicy.Conditions { + switch condt := val.(type) { + case map[string]interface{}: // Handle key:value map types. + for k, v := range condt { + if !isString(v) { // Pre-check value type. + // All values must be of type string. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt) + } + // {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ] + // In this case we will just collapse this into "eq" for all use cases. + parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct { + Operator string + Key string + Value string + }{ + policyCondEqual, "$" + strings.ToLower(k), toString(v), + }) + } + case []interface{}: // Handle array types. + if len(condt) != 3 { // Return error if we have insufficient elements. + return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String()) + } + switch toLowerString(condt[0]) { + case policyCondEqual, policyCondStartsWith: + for _, v := range condt { // Pre-check all values for type. + if !isString(v) { + // All values must be of type string. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt) + } + } + operator, matchType, value := toLowerString(condt[0]), toLowerString(condt[1]), toString(condt[2]) + if !strings.HasPrefix(matchType, "$") { + return parsedPolicy, fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", operator, matchType, value) + } + parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct { + Operator string + Key string + Value string + }{ + operator, matchType, value, + }) + case policyCondContentLength: + min, err := toInteger(condt[1]) + if err != nil { + return parsedPolicy, err + } + + max, err := toInteger(condt[2]) + if err != nil { + return parsedPolicy, err + } + + parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{ + Min: min, + Max: max, + Valid: true, + } + default: + // Condition should be valid. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", + reflect.TypeOf(condt).String(), condt) + } + default: + return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form", + condt, reflect.TypeOf(condt).String()) + } + } + return parsedPolicy, nil +} + +// checkPolicyCond returns a boolean to indicate if a condition is satisified according +// to the passed operator +func checkPolicyCond(op string, input1, input2 string) bool { + switch op { + case policyCondEqual: + return input1 == input2 + case policyCondStartsWith: + return strings.HasPrefix(input1, input2) + } + return false +} + +// CheckPostPolicy - apply policy conditions and validate input values. +// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html) +func CheckPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) error { + // Check if policy document expiry date is still not reached + if !postPolicyForm.Expiration.After(time.Now().UTC()) { + return fmt.Errorf("Invalid according to Policy: Policy expired") + } + // map to store the metadata + metaMap := make(map[string]string) + for _, policy := range postPolicyForm.Conditions.Policies { + if strings.HasPrefix(policy.Key, "$x-amz-meta-") { + formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) + metaMap[formCanonicalName] = policy.Value + } + } + // Check if any extra metadata field is passed as input + for key := range formValues { + if strings.HasPrefix(key, "X-Amz-Meta-") { + if _, ok := metaMap[key]; !ok { + return fmt.Errorf("Invalid according to Policy: Extra input fields: %s", key) + } + } + } + + // Flag to indicate if all policies conditions are satisfied + var condPassed bool + + // Iterate over policy conditions and check them against received form fields + for _, policy := range postPolicyForm.Conditions.Policies { + // Form fields names are in canonical format, convert conditions names + // to canonical for simplification purpose, so `$key` will become `Key` + formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) + // Operator for the current policy condition + op := policy.Operator + // If the current policy condition is known + if startsWithSupported, condFound := startsWithConds[policy.Key]; condFound { + // Check if the current condition supports starts-with operator + if op == policyCondStartsWith && !startsWithSupported { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed") + } + // Check if current policy condition is satisfied + condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) + if !condPassed { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed") + } + } else { + // This covers all conditions X-Amz-Meta-* and X-Amz-* + if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { + // Check if policy condition is satisfied + condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) + if !condPassed { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value) + } + } + } + } + + return nil +} diff --git a/weed/s3api/policy/postpolicyform_test.go b/weed/s3api/policy/postpolicyform_test.go new file mode 100644 index 000000000..1a9d78b0e --- /dev/null +++ b/weed/s3api/policy/postpolicyform_test.go @@ -0,0 +1,106 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/base64" + "fmt" + "net/http" + "testing" + "time" +) + +// Test Post Policy parsing and checking conditions +func TestPostPolicyForm(t *testing.T) { + pp := NewPostPolicy() + pp.SetBucket("testbucket") + pp.SetContentType("image/jpeg") + pp.SetUserMetadata("uuid", "14365123651274") + pp.SetKeyStartsWith("user/user1/filename") + pp.SetContentLengthRange(1048579, 10485760) + pp.SetSuccessStatusAction("201") + + type testCase struct { + Bucket string + Key string + XAmzDate string + XAmzAlgorithm string + XAmzCredential string + XAmzMetaUUID string + ContentType string + SuccessActionStatus string + Policy string + Expired bool + expectedErr error + } + + testCases := []testCase{ + // Everything is fine with this test + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: nil}, + // Expired policy document + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Expired: true, expectedErr: fmt.Errorf("Invalid according to Policy: Policy expired")}, + // Different AMZ date + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "2017T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Key which doesn't start with user/user1/filename + {Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect bucket name. + {Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect key name + {Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect date + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect ContentType + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect Metadata + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "151274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]")}, + } + // Validate all the test cases. + for i, tt := range testCases { + formValues := make(http.Header) + formValues.Set("Bucket", tt.Bucket) + formValues.Set("Key", tt.Key) + formValues.Set("Content-Type", tt.ContentType) + formValues.Set("X-Amz-Date", tt.XAmzDate) + formValues.Set("X-Amz-Meta-Uuid", tt.XAmzMetaUUID) + formValues.Set("X-Amz-Algorithm", tt.XAmzAlgorithm) + formValues.Set("X-Amz-Credential", tt.XAmzCredential) + if tt.Expired { + // Expired already. + pp.SetExpires(time.Now().UTC().AddDate(0, 0, -10)) + } else { + // Expires in 10 days. + pp.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) + } + + formValues.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String()))) + formValues.Set("Success_action_status", tt.SuccessActionStatus) + policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String()))) + if err != nil { + t.Fatal(err) + } + + postPolicyForm, err := ParsePostPolicyForm(string(policyBytes)) + if err != nil { + t.Fatal(err) + } + + err = CheckPostPolicy(formValues, postPolicyForm) + if err != nil && tt.expectedErr != nil && err.Error() != tt.expectedErr.Error() { + t.Fatalf("Test %d:, Expected %s, got %s", i+1, tt.expectedErr.Error(), err.Error()) + } + } +} diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go new file mode 100644 index 000000000..4e484ac98 --- /dev/null +++ b/weed/s3api/s3_constants/s3_actions.go @@ -0,0 +1,9 @@ +package s3_constants + +const ( + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" + ACTION_TAGGING = "Tagging" + ACTION_LIST = "List" +) diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index b680fe1e1..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -9,6 +9,8 @@ import ( const ( signV4Algorithm = "AWS4-HMAC-SHA256" signV2Algorithm = "AWS" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // Verify if request has JWT. @@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { - return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)) + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 492d94616..48e8cb047 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -4,21 +4,19 @@ import ( "context" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "math" "net/http" - "os" "time" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" -) - -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) ) type ListAllMyBucketsResult struct { @@ -29,29 +27,44 @@ type ListAllMyBucketsResult struct { func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { + var identity *Identity + var s3Err s3err.ErrorCode + if s3a.iam.isEnabled() { + identity, s3Err = s3a.iam.authUser(r) + if s3Err != s3err.ErrNone { + writeErrorResponse(w, s3Err, r.URL) + return + } + } + var response ListAllMyBucketsResult - entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } + identityId := r.Header.Get(xhttp.AmzIdentityId) + var buckets []*s3.Bucket for _, entry := range entries { if entry.IsDirectory { + if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name) { + continue + } buckets = append(buckets, &s3.Bucket{ Name: aws.String(entry.Name), - CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)), + CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()), }) } } response = ListAllMyBucketsResult{ Owner: &s3.Owner{ - ID: aws.String(""), - DisplayName: aws.String(""), + ID: aws.String(identityId), + DisplayName: aws.String(identityId), }, Buckets: buckets, } @@ -61,12 +74,51 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) + + // avoid duplicated buckets + errCode := s3err.ErrNone + if err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if resp, err := client.CollectionList(context.Background(), &filer_pb.CollectionListRequest{ + IncludeEcVolumes: true, + IncludeNormalVolumes: true, + }); err != nil { + glog.Errorf("list collection: %v", err) + return fmt.Errorf("list collections: %v", err) + } else { + for _, c := range resp.Collections { + if bucket == c.Name { + errCode = s3err.ErrBucketAlreadyExists + break + } + } + } + return nil + }); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist { + errCode = s3err.ErrBucketAlreadyExists + } + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + fn := func(entry *filer_pb.Entry) { + if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" { + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + entry.Extended[xhttp.AmzIdentityId] = []byte(identityId) + } + } // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil { + glog.Errorf("PutBucketHandler mkdir: %v", err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } @@ -75,11 +127,14 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) - ctx := context.Background() - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + writeErrorResponse(w, err, r.URL) + return + } + + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -87,17 +142,17 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) - if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil { + if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } return nil }) - err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, false, true) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } @@ -106,30 +161,42 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + writeErrorResponse(w, err, r.URL) + return + } - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: s3a.option.BucketsPath, - Name: bucket, - } + writeSuccessResponseEmpty(w) +} - glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { - return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) - } +func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode { + entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) + if entry == nil || err == filer_pb.ErrNotFound { + return s3err.ErrNoSuchBucket + } - return nil - }) + if !s3a.hasAccess(r, entry) { + return s3err.ErrAccessDenied + } + return s3err.ErrNone +} - if err != nil { - writeErrorResponse(w, ErrNoSuchBucket, r.URL) - return +func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { + isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != "" + if isAdmin { + return true + } + if entry.Extended == nil { + return true } - writeSuccessResponseEmpty(w) + identityId := r.Header.Get(xhttp.AmzIdentityId) + if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok { + if identityId != string(id) { + return false + } + } + return true } diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go deleted file mode 100644 index 7ba55ed28..000000000 --- a/weed/s3api/s3api_errors.go +++ /dev/null @@ -1,131 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "net/http" -) - -// APIError structure -type APIError struct { - Code string - Description string - HTTPStatusCode int -} - -// RESTErrorResponse - error response format -type RESTErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string `xml:"Code" json:"Code"` - Message string `xml:"Message" json:"Message"` - Resource string `xml:"Resource" json:"Resource"` - RequestID string `xml:"RequestId" json:"RequestId"` -} - -// ErrorCode type of error status. -type ErrorCode int - -// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -const ( - ErrNone ErrorCode = iota - ErrMethodNotAllowed - ErrBucketNotEmpty - ErrBucketAlreadyExists - ErrBucketAlreadyOwnedByYou - ErrNoSuchBucket - ErrNoSuchUpload - ErrInvalidBucketName - ErrInvalidDigest - ErrInvalidMaxKeys - ErrInvalidMaxUploads - ErrInvalidMaxParts - ErrInvalidPartNumberMarker - ErrInvalidPart - ErrInternalError - ErrNotImplemented -) - -// error code to APIError structure, these fields carry respective -// descriptions for all the error responses. -var errorCodeResponse = map[ErrorCode]APIError{ - ErrMethodNotAllowed: { - Code: "MethodNotAllowed", - Description: "The specified method is not allowed against this resource.", - HTTPStatusCode: http.StatusMethodNotAllowed, - }, - ErrBucketNotEmpty: { - Code: "BucketNotEmpty", - Description: "The bucket you tried to delete is not empty", - HTTPStatusCode: http.StatusConflict, - }, - ErrBucketAlreadyExists: { - Code: "BucketAlreadyExists", - Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", - HTTPStatusCode: http.StatusConflict, - }, - ErrBucketAlreadyOwnedByYou: { - Code: "BucketAlreadyOwnedByYou", - Description: "Your previous request to create the named bucket succeeded and you already own it.", - HTTPStatusCode: http.StatusConflict, - }, - ErrInvalidBucketName: { - Code: "InvalidBucketName", - Description: "The specified bucket is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidDigest: { - Code: "InvalidDigest", - Description: "The Content-Md5 you specified is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxUploads: { - Code: "InvalidArgument", - Description: "Argument max-uploads must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxKeys: { - Code: "InvalidArgument", - Description: "Argument maxKeys must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxParts: { - Code: "InvalidArgument", - Description: "Argument max-parts must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPartNumberMarker: { - Code: "InvalidArgument", - Description: "Argument partNumberMarker must be an integer.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNoSuchBucket: { - Code: "NoSuchBucket", - Description: "The specified bucket does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchUpload: { - Code: "NoSuchUpload", - Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrInternalError: { - Code: "InternalError", - Description: "We encountered an internal error, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, - - ErrInvalidPart: { - Code: "InvalidPart", - Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNotImplemented: { - Code: "NotImplemented", - Description: "A header you provided implies functionality that is not implemented", - HTTPStatusCode: http.StatusNotImplemented, - }, -} - -// getAPIError provides API Error for input API error code. -func getAPIError(code ErrorCode) APIError { - return errorCodeResponse[code] -} diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 127be07e3..6935c75bd 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,17 +2,20 @@ package s3api import ( "bytes" - "context" "encoding/base64" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "net/url" + "strconv" "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type mimeType string @@ -37,30 +40,35 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&S3ApiServer{}) + +func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) } +func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} // If none of the http routes match respond with MethodNotAllowed func notFoundHandler(w http.ResponseWriter, r *http.Request) { glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) - writeErrorResponse(w, ErrMethodNotAllowed, r.URL) + writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL) } -func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) { - apiError := getAPIError(errorCode) +func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) { + apiError := s3err.GetAPIError(errorCode) errorResponse := getRESTErrorResponse(apiError, reqURL.Path) encodedErrorResponse := encodeResponse(errorResponse) writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) } -func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse { - return RESTErrorResponse{ +func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse { + return s3err.RESTErrorResponse{ Code: err.Code, Message: err.Description, Resource: resource, @@ -70,13 +78,19 @@ func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse { func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } if mType != mimeNone { w.Header().Set("Content-Type", string(mType)) } w.WriteHeader(statusCode) if response != nil { glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) - w.Write(response) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } w.(http.Flusher).Flush() } } diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..84a85fd78 --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,174 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + dstBucket, dstObject := getBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + + if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) { + fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) + dir, name := fullPath.DirAndName() + entry, err := s3a.getEntry(dir, name) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r)) + err = s3a.touch(dir, name, entry) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + writeSuccessResponseXML(w, encodeResponse(CopyObjectResult{ + ETag: fmt.Sprintf("%x", entry.Attributes.Md5), + LastModified: time.Now().UTC(), + })) + return + } + + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, s3err.ErrInvalidCopyDest, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, resp, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + defer util.CloseResponse(resp) + + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + dstBucket, _ := getBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > globalMaxPartID { + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func isReplace(r *http.Request) bool { + return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..f1a539ac5 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -3,15 +3,24 @@ package s3api import ( "crypto/md5" "encoding/json" + "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" "io" "io/ioutil" "net/http" + "net/url" + "sort" "strings" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -20,6 +29,7 @@ var ( func init() { client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} } @@ -28,50 +38,73 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) _, err := validateContentMd5(r.Header) if err != nil { - writeErrorResponse(w, ErrInvalidDigest, r.URL) + writeErrorResponse(w, s3err.ErrInvalidDigest, r.URL) return } - rAuthType := getRequestAuthType(r) dataReader := r.Body - if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + if s3a.iam.isEnabled() { + rAuthType := getRequestAuthType(r) + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return + } } + defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) + if strings.HasSuffix(object, "/") { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + } else { + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) - etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) - if errCode != ErrNone { - writeErrorResponse(w, errCode, r.URL) - return - } + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } - setEtag(w, etag) + setEtag(w, etag) + } writeSuccessResponseEmpty(w) } +func urlPathEscape(object string) string { + var escapedParts []string + for _, part := range strings.Split(object, "/") { + escapedParts = append(escapedParts, url.PathEscape(part)) + } + return strings.Join(escapedParts, "/") +} + func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) if strings.HasSuffix(r.URL.Path, "/") { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -79,12 +112,10 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -92,29 +123,152 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) - destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) - s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { + s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } w.WriteHeader(http.StatusNoContent) }) +} + +// / ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` } // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - // TODO - writeErrorResponse(w, ErrNotImplemented, r.URL) + + bucket, _ := getBucketAndObject(r) + + deleteXMLBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + writeErrorResponse(w, s3err.ErrMalformedXML, r.URL) + return + } + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + + directoriesWithDeletion := make(map[string]int) + + s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + // delete file entries + for _, object := range deleteObjects.Objects { + + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + directoriesWithDeletion[parentDirectoryPath]++ + deletedObjects = append(deletedObjects, object) + } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { + deletedObjects = append(deletedObjects, object) + } else { + delete(directoriesWithDeletion, parentDirectoryPath) + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } + } + + // purge empty folders, only checking folders with deletions + for len(directoriesWithDeletion) > 0 { + directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) + } + + return nil + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, encodeResponse(deleteResp)) + } -func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) { +func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { + var allDirs []string + for dir, _ := range directoriesWithDeletion { + allDirs = append(allDirs, dir) + } + sort.Slice(allDirs, func(i, j int) bool { + return len(allDirs[i]) > len(allDirs[j]) + }) + newDirectoriesWithDeletion = make(map[string]int) + for _, dir := range allDirs { + parentDir, dirName := util.FullPath(dir).DirAndName() + if parentDir == s3a.option.BucketsPath { + continue + } + if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { + glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) + } else { + newDirectoriesWithDeletion[parentDir]++ + } + } + return +} + +var passThroughHeaders = []string{ + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", +} + +func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) { glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl) @@ -122,15 +276,27 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des if err != nil { glog.Errorf("NewRequest %s: %v", destUrl, err) - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } proxyReq.Header.Set("Host", s3a.option.Filer) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Etag-MD5", "True") for header, values := range r.Header { + // handle s3 related headers + passed := false + for _, h := range passThroughHeaders { + if strings.ToLower(header) == h && len(values) > 0 { + proxyReq.Header.Add(header[len("response-"):], values[0]) + passed = true + break + } + } + if passed { + continue + } + // handle other headers for _, value := range values { proxyReq.Header.Add(header, value) } @@ -140,31 +306,44 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des if postErr != nil { glog.Errorf("post to filer: %v", postErr) - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } - defer resp.Body.Close() + defer util.CloseResponse(resp) + + if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { + if r.Method != "DELETE" { + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + return + } + } responseFn(resp, w) + } -func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { + +func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } - w.WriteHeader(proxyResonse.StatusCode) - io.Copy(w, proxyResonse.Body) + if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 { + w.WriteHeader(http.StatusPartialContent) + } else { + w.WriteHeader(proxyResponse.StatusCode) + } + io.Copy(w, proxyResponse.Body) } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) { hash := md5.New() - var body io.Reader = io.TeeReader(dataReader, hash) + var body = io.TeeReader(dataReader, hash) proxyReq, err := http.NewRequest("PUT", uploadUrl, body) if err != nil { glog.Errorf("NewRequest %s: %v", uploadUrl, err) - return "", ErrInternalError + return "", s3err.ErrInternalError } proxyReq.Header.Set("Host", s3a.option.Filer) @@ -178,11 +357,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := client.Do(proxyReq) - dataReader.Close() - if postErr != nil { glog.Errorf("post to filer: %v", postErr) - return "", ErrInternalError + return "", s3err.ErrInternalError } defer resp.Body.Close() @@ -190,21 +367,21 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { - glog.Errorf("upload to filer response read: %v", ra_err) - return etag, ErrInternalError + glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) + return etag, s3err.ErrInternalError } var ret weed_server.FilerPostResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) - return "", ErrInternalError + return "", s3err.ErrInternalError } if ret.Error != "" { glog.Errorf("upload to filer error: %v", ret.Error) - return "", ErrInternalError + return "", filerErrorToS3Error(ret.Error) } - return etag, ErrNone + return etag, s3err.ErrNone } func setEtag(w http.ResponseWriter, etag string) { @@ -217,10 +394,20 @@ func setEtag(w http.ResponseWriter, etag string) { } } -func getObject(vars map[string]string) string { - object := vars["object"] +func getBucketAndObject(r *http.Request) (bucket, object string) { + vars := mux.Vars(r) + bucket = vars["bucket"] + object = vars["object"] if !strings.HasPrefix(object, "/") { object = "/" + object } - return object + + return +} + +func filerErrorToS3Error(errString string) s3err.ErrorCode { + if strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory") { + return s3err.ErrExistingObjectIsDirectory + } + return s3err.ErrInternalError } diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go new file mode 100644 index 000000000..035302ae6 --- /dev/null +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -0,0 +1,241 @@ +package s3api + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/policy" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/dustin/go-humanize" + "github.com/gorilla/mux" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html + + bucket := mux.Vars(r)["bucket"] + + reader, err := r.MultipartReader() + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + form, err := reader.ReadForm(int64(5 * humanize.MiByte)) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + defer form.RemoveAll() + + fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + if fileBody == nil { + writeErrorResponse(w, s3err.ErrPOSTFileRequired, r.URL) + return + } + defer fileBody.Close() + + formValues.Set("Bucket", bucket) + + if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") { + formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1)) + } + object := formValues.Get("Key") + + successRedirect := formValues.Get("success_action_redirect") + successStatus := formValues.Get("success_action_status") + var redirectURL *url.URL + if successRedirect != "" { + redirectURL, err = url.Parse(successRedirect) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + } + + // Verify policy signature. + errCode := s3a.iam.doesPolicySignatureMatch(formValues) + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy")) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + + // Handle policy if it is set. + if len(policyBytes) > 0 { + + postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes)) + if err != nil { + writeErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r.URL) + return + } + + // Make sure formValues adhere to policy restrictions. + if err = policy.CheckPostPolicy(formValues, postPolicyForm); err != nil { + w.Header().Set("Location", r.URL.Path) + w.WriteHeader(http.StatusTemporaryRedirect) + return + } + + // Ensure that the object size is within expected range, also the file size + // should not exceed the maximum single Put size (5 GiB) + lengthRange := postPolicyForm.Conditions.ContentLengthRange + if lengthRange.Valid { + if fileSize < lengthRange.Min { + writeErrorResponse(w, s3err.ErrEntityTooSmall, r.URL) + return + } + + if fileSize > lengthRange.Max { + writeErrorResponse(w, s3err.ErrEntityTooLarge, r.URL) + return + } + } + } + + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) + + etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + if successRedirect != "" { + // Replace raw query params.. + redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag) + w.Header().Set("Location", redirectURL.String()) + writeResponse(w, http.StatusSeeOther, nil, mimeNone) + return + } + + setEtag(w, etag) + + // Decide what http response to send depending on success_action_status parameter + switch successStatus { + case "201": + resp := encodeResponse(PostResponse{ + Bucket: bucket, + Key: object, + ETag: `"` + etag + `"`, + Location: w.Header().Get("Location"), + }) + writeResponse(w, http.StatusCreated, resp, mimeXML) + case "200": + writeResponse(w, http.StatusOK, nil, mimeNone) + default: + writeSuccessResponseEmpty(w) + } + +} + +// Extract form fields and file data from a HTTP POST Policy +func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { + /// HTML Form values + fileName = "" + + // Canonicalize the form values into http.Header. + formValues = make(http.Header) + for k, v := range form.Value { + formValues[http.CanonicalHeaderKey(k)] = v + } + + // Validate form values. + if err = validateFormFieldSize(formValues); err != nil { + return nil, "", 0, nil, err + } + + // this means that filename="" was not specified for file key and Go has + // an ugly way of handling this situation. Refer here + // https://golang.org/src/mime/multipart/formdata.go#L61 + if len(form.File) == 0 { + var b = &bytes.Buffer{} + for _, v := range formValues["File"] { + b.WriteString(v) + } + fileSize = int64(b.Len()) + filePart = ioutil.NopCloser(b) + return filePart, fileName, fileSize, formValues, nil + } + + // Iterator until we find a valid File field and break + for k, v := range form.File { + canonicalFormName := http.CanonicalHeaderKey(k) + if canonicalFormName == "File" { + if len(v) == 0 { + return nil, "", 0, nil, errors.New("Invalid arguments specified") + } + // Fetch fileHeader which has the uploaded file information + fileHeader := v[0] + // Set filename + fileName = fileHeader.Filename + // Open the uploaded part + filePart, err = fileHeader.Open() + if err != nil { + return nil, "", 0, nil, err + } + // Compute file size + fileSize, err = filePart.(io.Seeker).Seek(0, 2) + if err != nil { + return nil, "", 0, nil, err + } + // Reset Seek to the beginning + _, err = filePart.(io.Seeker).Seek(0, 0) + if err != nil { + return nil, "", 0, nil, err + } + // File found and ready for reading + break + } + } + return filePart, fileName, fileSize, formValues, nil +} + +// Validate form field size for s3 specification requirement. +func validateFormFieldSize(formValues http.Header) error { + // Iterate over form values + for k := range formValues { + // Check if value's field exceeds S3 limit + if int64(len(formValues.Get(k))) > int64(1*humanize.MiByte) { + return errors.New("Data size larger than expected") + } + } + + // Success. + return nil +} + +func getRedirectPostRawQuery(bucket, key, etag string) string { + redirectValues := make(url.Values) + redirectValues.Set("bucket", bucket) + redirectValues.Set("key", key) + redirectValues.Set("etag", "\""+etag+"\"") + return redirectValues.Encode() +} + +// Check to see if Policy is signed correctly. +func (iam *IdentityAccessManagement) doesPolicySignatureMatch(formValues http.Header) s3err.ErrorCode { + // For SignV2 - Signature field will be valid + if _, ok := formValues["Signature"]; ok { + return iam.doesPolicySignatureV2Match(formValues) + } + return iam.doesPolicySignatureV4Match(formValues) +} diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 72a25e4a5..4ddb24e31 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,65 +1,61 @@ package s3api import ( - "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "net/url" "strconv" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" ) const ( - maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 1000 // Limit number of parts in a listPartsResponse. - globalMaxPartID = 10000 + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 ) // NewMultipartUploadHandler - New multipart upload. func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - var object, bucket string - vars := mux.Vars(r) - bucket = vars["bucket"] - object = vars["object"] + bucket, object := getBucketAndObject(r) - response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), }) - if errCode != ErrNone { + glog.V(2).Info("NewMultipartUploadHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("NewMultipartUploadHandler", string(encodeResponse(response))) - writeSuccessResponseXML(w, encodeResponse(response)) } // CompleteMultipartUploadHandler - Completes multipart upload. func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) - // println("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode) + glog.V(2).Info("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } @@ -70,25 +66,23 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // AbortMultipartUploadHandler - Aborts multipart upload. func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("AbortMultipartUploadHandler", string(encodeResponse(response))) + glog.V(2).Info("AbortMultipartUploadHandler", string(encodeResponse(response))) writeSuccessResponseXML(w, encodeResponse(response)) @@ -96,23 +90,22 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // ListMultipartUploadsHandler - Lists multipart uploads. func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) if maxUploads < 0 { - writeErrorResponse(w, ErrInvalidMaxUploads, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxUploads, r.URL) return } if keyMarker != "" { // Marker not common with prefix is not implemented. if !strings.HasPrefix(keyMarker, prefix) { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } } - response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -122,34 +115,33 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht UploadIdMarker: aws.String(uploadIDMarker), }) - if errCode != ErrNone { + glog.V(2).Info("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } // TODO handle encodingType - // println("ListMultipartUploadsHandler", string(encodeResponse(response))) writeSuccessResponseXML(w, encodeResponse(response)) } // ListObjectPartsHandler - Lists object parts in a multipart upload. func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) if partNumberMarker < 0 { - writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL) + writeErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r.URL) return } if maxParts < 0 { - writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) return } - response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), @@ -157,55 +149,64 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re UploadId: aws.String(uploadID), }) - if errCode != ErrNone { + glog.V(2).Info("ListObjectPartsHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("ListObjectPartsHandler", string(encodeResponse(response))) - writeSuccessResponseXML(w, encodeResponse(response)) } // PutObjectPartHandler - Put an object part in a multipart upload. func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - rAuthType := getRequestAuthType(r) - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) if !exists { - writeErrorResponse(w, ErrNoSuchUpload, r.URL) + writeErrorResponse(w, s3err.ErrNoSuchUpload, r.URL) return } partIDString := r.URL.Query().Get("partNumber") partID, err := strconv.Atoi(partIDString) if err != nil { - writeErrorResponse(w, ErrInvalidPart, r.URL) + writeErrorResponse(w, s3err.ErrInvalidPart, r.URL) return } if partID > globalMaxPartID { - writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) return } dataReader := r.Body - if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + if s3a.iam.isEnabled() { + rAuthType := getRequestAuthType(r) + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return + } } + defer dataReader.Close() uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", - s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) + s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID, bucket) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go new file mode 100644 index 000000000..94719834c --- /dev/null +++ b/weed/s3api/s3api_object_tagging_handlers.go @@ -0,0 +1,117 @@ +package s3api + +import ( + "encoding/xml" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "io/ioutil" + "net/http" +) + +// GetObjectTaggingHandler - GET object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + tags, err := s3a.getTags(dir, name) + if err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + writeSuccessResponseXML(w, encodeResponse(FromTags(tags))) + +} + +// PutObjectTaggingHandler Put object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + tagging := &Tagging{} + input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) + if err != nil { + glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + if err = xml.Unmarshal(input, tagging); err != nil { + glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrMalformedXML, r.URL) + return + } + tags := tagging.ToTags() + if len(tags) > 10 { + glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags)) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + for k, v := range tags { + if len(k) > 128 { + glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + if len(v) > 256 { + glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + } + + if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + w.WriteHeader(http.StatusNoContent) + +} + +// DeleteObjectTaggingHandler Delete object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + err := s3a.rmTags(dir, name) + if err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index aa6849cbd..739cdd8f9 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -2,7 +2,9 @@ package s3api import ( "context" + "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "io" "net/http" "net/url" @@ -11,51 +13,72 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. -) +type ListBucketResultV2 struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` + KeyCount int `xml:"KeyCount"` + StartAfter string `xml:"StartAfter,omitempty"` +} func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html // collect parameters - vars := mux.Vars(r) - bucket := vars["bucket"] - - glog.V(4).Infof("read v2: %v", vars) + bucket, _ := getBucketAndObject(r) - originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) + originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) if maxKeys < 0 { - writeErrorResponse(w, ErrInvalidMaxKeys, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL) return } if delimiter != "" && delimiter != "/" { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } - if marker == "" { + marker := continuationToken + if continuationToken == "" { marker = startAfter } - ctx := context.Background() - - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } + responseV2 := &ListBucketResultV2{ + XMLName: response.XMLName, + Name: response.Name, + CommonPrefixes: response.CommonPrefixes, + Contents: response.Contents, + ContinuationToken: continuationToken, + Delimiter: response.Delimiter, + IsTruncated: response.IsTruncated, + KeyCount: len(response.Contents) + len(response.CommonPrefixes), + MaxKeys: response.MaxKeys, + NextContinuationToken: response.NextMarker, + Prefix: response.Prefix, + StartAfter: startAfter, + } - writeSuccessResponseXML(w, encodeResponse(response)) + writeSuccessResponseXML(w, encodeResponse(responseV2)) } func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { @@ -63,121 +86,203 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html // collect parameters - vars := mux.Vars(r) - bucket := vars["bucket"] - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { - writeErrorResponse(w, ErrInvalidMaxKeys, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL) return } if delimiter != "" && delimiter != "/" { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { - +func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name - dir, prefix := filepath.Split(originalPrefix) - if strings.HasPrefix(dir, "/") { - dir = dir[1:] + reqDir, prefix := filepath.Split(originalPrefix) + if strings.HasPrefix(reqDir, "/") { + reqDir = reqDir[1:] + } + bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) + reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir) + if strings.HasSuffix(reqDir, "/") { + // remove trailing "/" + reqDir = reqDir[:len(reqDir)-1] } - // check filer - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.ListEntriesRequest{ - Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), - Prefix: prefix, - Limit: uint32(maxKeys + 1), - StartFromFileName: marker, - InclusiveStartFrom: false, - } - - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list buckets: %v", err) - } + var contents []ListEntry + var commonPrefixes []PrefixEntry + var isTruncated bool + var doErr error + var nextMarker string - var contents []ListEntry - var commonPrefixes []PrefixEntry - var counter int - var lastEntryName string - var isTruncated bool - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } + // check filer + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - entry := resp.Entry - counter++ - if counter > maxKeys { - isTruncated = true - break - } - lastEntryName = entry.Name + _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) { if entry.IsDirectory { - if entry.Name != ".uploads" { + if delimiter == "/" { commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), + Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):], }) } } else { + storageClass := "STANDARD" + if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { + storageClass = string(v) + } contents = append(contents, ListEntry{ - Key: fmt.Sprintf("%s%s", dir, entry.Name), - LastModified: time.Unix(entry.Attributes.Mtime, 0), - ETag: "\"" + filer2.ETag(entry.Chunks) + "\"", - Size: int64(filer2.TotalSize(entry.Chunks)), + Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), Owner: CanonicalUser{ ID: fmt.Sprintf("%x", entry.Attributes.Uid), DisplayName: entry.Attributes.UserName, }, - StorageClass: "STANDARD", + StorageClass: StorageClass(storageClass), }) } + }) + if doErr != nil { + return doErr + } + if !isTruncated { + nextMarker = "" } response = ListBucketResult{ Name: bucket, Prefix: originalPrefix, Marker: marker, - NextMarker: lastEntryName, + NextMarker: nextMarker, MaxKeys: maxKeys, - Delimiter: "/", + Delimiter: delimiter, IsTruncated: isTruncated, Contents: contents, CommonPrefixes: commonPrefixes, } - glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response) - return nil }) return } +func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { + // invariants + // prefix and marker should be under dir, marker may contain "/" + // maxKeys should be updated for each recursion + + if prefix == "/" && delimiter == "/" { + return + } + if maxKeys <= 0 { + return + } + + if strings.Contains(marker, "/") { + sepIndex := strings.Index(marker, "/") + subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:] + // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn) + if subErr != nil { + err = subErr + return + } + isTruncated = isTruncated || subIsTruncated + maxKeys -= subCounter + nextMarker = subDir + "/" + subNextMarker + // finished processing this sub directory + marker = subDir + } + + // now marker is also a direct child of dir + request := &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: prefix, + Limit: uint32(maxKeys + 1), + StartFromFileName: marker, + InclusiveStartFrom: false, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, listErr := client.ListEntries(ctx, request) + if listErr != nil { + err = fmt.Errorf("list entires %+v: %v", request, listErr) + return + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + err = fmt.Errorf("iterating entires %+v: %v", request, recvErr) + return + } + } + if counter >= maxKeys { + isTruncated = true + return + } + entry := resp.Entry + nextMarker = entry.Name + if entry.IsDirectory { + // println("ListEntries", dir, "dir:", entry.Name) + if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys + if delimiter != "/" { + eachEntryFn(dir, entry) + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn) + if subErr != nil { + err = fmt.Errorf("doListFilerEntries2: %v", subErr) + return + } + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) + counter += subCounter + nextMarker = entry.Name + "/" + subNextMarker + if subIsTruncated { + isTruncated = true + return + } + } else { + var isEmpty bool + if !s3a.option.AllowEmptyFolder { + if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { + glog.Errorf("check empty folder %s: %v", dir, err) + } + } + if !isEmpty { + eachEntryFn(dir, entry) + counter++ + } + } + } + } else { + // println("ListEntries", dir, "file:", entry.Name) + eachEntryFn(dir, entry) + counter++ + } + } + return +} + func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) { prefix = values.Get("prefix") token = values.Get("continuation-token") @@ -203,3 +308,57 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, } return } + +func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) { + // println("+ isDirectoryAllEmpty", dir, name) + glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name) + defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) + var fileCounter int + var subDirs []string + currentDir := parentDir + "/" + name + var startFrom string + var isExhausted bool + var foundEntry bool + for fileCounter == 0 && !isExhausted && err == nil { + err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error { + foundEntry = true + if entry.IsDirectory { + subDirs = append(subDirs, entry.Name) + } else { + fileCounter++ + } + startFrom = entry.Name + isExhausted = isExhausted || isLast + glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) + return nil + }, startFrom, false, 8) + if !foundEntry { + break + } + } + + if err != nil { + return false, err + } + + if fileCounter > 0 { + return false, nil + } + + for _, subDir := range subDirs { + isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir) + if subErr != nil { + return false, subErr + } + if !isSubEmpty { + return false, nil + } + } + + glog.V(1).Infof("deleting empty folder %s", currentDir) + if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil { + return + } + + return true, nil +} diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index edf634444..54df29492 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,30 +1,43 @@ package s3api import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "net/http" + "strings" + "time" + "github.com/gorilla/mux" "google.golang.org/grpc" - "net/http" ) type S3ApiServerOption struct { Filer string + Port int FilerGrpcAddress string + Config string DomainName string BucketsPath string GrpcDialOption grpc.DialOption + AllowEmptyFolder bool } type S3ApiServer struct { option *S3ApiServerOption + iam *IdentityAccessManagement } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, + iam: NewIdentityAccessManagement(option), } s3ApiServer.registerRouter(router) + go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano()) + return s3ApiServer, nil } @@ -33,55 +46,70 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { apiRouter := router.PathPrefix("/").Subrouter() var routers []*mux.Router if s3a.option.DomainName != "" { - routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter()) + domainNames := strings.Split(s3a.option.DomainName, ",") + for _, domainName := range domainNames { + routers = append(routers, apiRouter.Host( + fmt.Sprintf("%s.%s:%d", "{bucket:.+}", domainName, s3a.option.Port)).Subrouter()) + routers = append(routers, apiRouter.Host( + fmt.Sprintf("%s.%s", "{bucket:.+}", domainName)).Subrouter()) + } } routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter()) for _, bucket := range routers { // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET")) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET")) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "") + // GetObjectTagging + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "") + // PutObjectTagging + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "") + // DeleteObjectTagging + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "") + + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY")) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT")) // PutBucket - bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler) + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT")) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE")) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler) + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE")) // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2") // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET")) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler) + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST")) + + // PostPolicy + bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST")) // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation @@ -96,14 +124,12 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "") // DeleteBucketPolicy bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "") - // PostPolicy - bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler) */ } // ListBuckets - apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler) + apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST")) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/s3api/s3err/s3-error.go b/weed/s3api/s3err/s3-error.go new file mode 100644 index 000000000..224378ec5 --- /dev/null +++ b/weed/s3api/s3err/s3-error.go @@ -0,0 +1,61 @@ +package s3err + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go new file mode 100644 index 000000000..a3f7bb25e --- /dev/null +++ b/weed/s3api/s3err/s3api_errors.go @@ -0,0 +1,359 @@ +package s3err + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +// APIError structure +type APIError struct { + Code string + Description string + HTTPStatusCode int +} + +// RESTErrorResponse - error response format +type RESTErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + RequestID string `xml:"RequestId" json:"RequestId"` + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e RESTErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// ErrorCode type of error status. +type ErrorCode int + +// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +const ( + ErrNone ErrorCode = iota + ErrAccessDenied + ErrMethodNotAllowed + ErrBucketNotEmpty + ErrBucketAlreadyExists + ErrBucketAlreadyOwnedByYou + ErrNoSuchBucket + ErrNoSuchKey + ErrNoSuchUpload + ErrInvalidBucketName + ErrInvalidDigest + ErrInvalidMaxKeys + ErrInvalidMaxUploads + ErrInvalidMaxParts + ErrInvalidPartNumberMarker + ErrInvalidPart + ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource + ErrInvalidTag + ErrAuthHeaderEmpty + ErrSignatureVersionNotSupported + ErrMalformedPOSTRequest + ErrPOSTFileRequired + ErrPostPolicyConditionInvalidFormat + ErrEntityTooSmall + ErrEntityTooLarge + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrMalformedXML + ErrMalformedDate + ErrMalformedPresignedDate + ErrMalformedCredentialDate + ErrMissingSignHeadersTag + ErrMissingSignTag + ErrUnsignedHeaders + ErrInvalidQueryParams + ErrInvalidQuerySignatureAlgo + ErrExpiredPresignRequest + ErrMalformedExpires + ErrNegativeExpires + ErrMaximumExpires + ErrSignatureDoesNotMatch + ErrContentSHA256Mismatch + ErrInvalidAccessKeyID + ErrRequestNotReadyYet + ErrMissingDateHeader + ErrInvalidRequest + ErrNotImplemented + + ErrExistingObjectIsDirectory +) + +// error code to APIError structure, these fields carry respective +// descriptions for all the error responses. +var errorCodeResponse = map[ErrorCode]APIError{ + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMethodNotAllowed: { + Code: "MethodNotAllowed", + Description: "The specified method is not allowed against this resource.", + HTTPStatusCode: http.StatusMethodNotAllowed, + }, + ErrBucketNotEmpty: { + Code: "BucketNotEmpty", + Description: "The bucket you tried to delete is not empty", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyExists: { + Code: "BucketAlreadyExists", + Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyOwnedByYou: { + Code: "BucketAlreadyOwnedByYou", + Description: "Your previous request to create the named bucket succeeded and you already own it.", + HTTPStatusCode: http.StatusConflict, + }, + ErrInvalidBucketName: { + Code: "InvalidBucketName", + Description: "The specified bucket is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDigest: { + Code: "InvalidDigest", + Description: "The Content-Md5 you specified is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxUploads: { + Code: "InvalidArgument", + Description: "Argument max-uploads must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxKeys: { + Code: "InvalidArgument", + Description: "Argument maxKeys must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxParts: { + Code: "InvalidArgument", + Description: "Argument max-parts must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartNumberMarker: { + Code: "InvalidArgument", + Description: "Argument partNumberMarker must be an integer.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchBucket: { + Code: "NoSuchBucket", + Description: "The specified bucket does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchKey: { + Code: "NoSuchKey", + Description: "The specified key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUpload: { + Code: "NoSuchUpload", + Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrInternalError: { + Code: "InternalError", + Description: "We encountered an internal error, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, + + ErrInvalidPart: { + Code: "InvalidPart", + Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTag: { + Code: "InvalidArgument", + Description: "The Tag value you have provided is invalid", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPOSTRequest: { + Code: "MalformedPOSTRequest", + Description: "The body of your POST request is not well-formed multipart/form-data.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPOSTFileRequired: { + Code: "InvalidArgument", + Description: "POST requires exactly one file upload per request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPostPolicyConditionInvalidFormat: { + Code: "PostPolicyInvalidKeyName", + Description: "Invalid according to Policy: Policy Condition failed", + HTTPStatusCode: http.StatusForbidden, + }, + ErrEntityTooSmall: { + Code: "EntityTooSmall", + Description: "Your proposed upload is smaller than the minimum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooLarge: { + Code: "EntityTooLarge", + Description: "Your proposed upload exceeds the maximum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingFields: { + Code: "MissingFields", + Description: "Missing fields in request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCredMalformed: { + Code: "AuthorizationQueryParametersError", + Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedDate: { + Code: "MalformedDate", + Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPresignedDate: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuerySignatureAlgo: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires should be a number", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNegativeExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be non-negative", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMaximumExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The access key ID you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrContentSHA256Mismatch: { + Code: "XAmzContentSHA256Mismatch", + Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNotImplemented: { + Code: "NotImplemented", + Description: "A header you provided implies functionality that is not implemented", + HTTPStatusCode: http.StatusNotImplemented, + }, + ErrExistingObjectIsDirectory: { + Code: "ExistingObjectIsDirectory", + Description: "Existing Object is a directory.", + HTTPStatusCode: http.StatusConflict, + }, +} + +// GetAPIError provides API Error for input API error code. +func GetAPIError(code ErrorCode) APIError { + return errorCodeResponse[code] +} diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go new file mode 100644 index 000000000..b667b32a0 --- /dev/null +++ b/weed/s3api/stats.go @@ -0,0 +1,38 @@ +package s3api + +import ( + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "net/http" + "strconv" + "time" +) + +type StatusRecorder struct { + http.ResponseWriter + Status int +} + +func NewStatusResponseWriter(w http.ResponseWriter) *StatusRecorder { + return &StatusRecorder{w, http.StatusOK} +} + +func (r *StatusRecorder) WriteHeader(status int) { + r.Status = status + r.ResponseWriter.WriteHeader(status) +} + +func (r *StatusRecorder) Flush() { + r.ResponseWriter.(http.Flusher).Flush() +} + +func track(f http.HandlerFunc, action string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION) + recorder := NewStatusResponseWriter(w) + start := time.Now() + f(recorder, r) + stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds()) + stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc() + } +} diff --git a/weed/s3api/tags.go b/weed/s3api/tags.go new file mode 100644 index 000000000..9ff7d1fba --- /dev/null +++ b/weed/s3api/tags.go @@ -0,0 +1,38 @@ +package s3api + +import ( + "encoding/xml" +) + +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +type TagSet struct { + Tag []Tag `xml:"Tag"` +} + +type Tagging struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"` + TagSet TagSet `xml:"TagSet"` +} + +func (t *Tagging) ToTags() map[string]string { + output := make(map[string]string) + for _, tag := range t.TagSet.Tag { + output[tag.Key] = tag.Value + } + return output +} + +func FromTags(tags map[string]string) (t *Tagging) { + t = &Tagging{} + for k, v := range tags { + t.TagSet.Tag = append(t.TagSet.Tag, Tag{ + Key: k, + Value: v, + }) + } + return +} diff --git a/weed/s3api/tags_test.go b/weed/s3api/tags_test.go new file mode 100644 index 000000000..887843d6f --- /dev/null +++ b/weed/s3api/tags_test.go @@ -0,0 +1,50 @@ +package s3api + +import ( + "encoding/xml" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestXMLUnmarshall(t *testing.T) { + + input := `<?xml version="1.0" encoding="UTF-8"?> +<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <TagSet> + <Tag> + <Key>key1</Key> + <Value>value1</Value> + </Tag> + </TagSet> +</Tagging> +` + + tags := &Tagging{} + + xml.Unmarshal([]byte(input), tags) + + assert.Equal(t, len(tags.TagSet.Tag), 1) + assert.Equal(t, tags.TagSet.Tag[0].Key, "key1") + assert.Equal(t, tags.TagSet.Tag[0].Value, "value1") + +} + +func TestXMLMarshall(t *testing.T) { + tags := &Tagging{ + TagSet: TagSet{ + []Tag{ + { + Key: "key1", + Value: "value1", + }, + }, + }, + } + + actual := string(encodeResponse(tags)) + + expected := `<?xml version="1.0" encoding="UTF-8"?> +<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><TagSet><Tag><Key>key1</Key><Value>value1</Value></Tag></TagSet></Tagging>` + assert.Equal(t, expected, actual) + +} diff --git a/weed/security/guard.go b/weed/security/guard.go index 17fe2ea9e..87ec91ec1 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -62,7 +62,7 @@ func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSi return g } -func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { +func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc { if !g.isWriteActive { //if no security needed, just skip all checking return f diff --git a/weed/security/tls.go b/weed/security/tls.go index e81ba4831..7d3ffcdca 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -1,31 +1,46 @@ package security import ( + "context" "crypto/tls" "crypto/x509" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/util" + grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "io/ioutil" + "strings" - "github.com/chrislusf/seaweedfs/weed/glog" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "github.com/chrislusf/seaweedfs/weed/glog" ) -func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { +type Authenticator struct { + AllowedWildcardDomain string + AllowedCommonNames map[string]bool +} + +func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption, grpc.ServerOption) { if config == nil { - return nil + return nil, nil } // load cert/key, ca cert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) - return nil + glog.V(1).Infof("load cert: %s / key: %s error: %v", + config.GetString(component+".cert"), + config.GetString(component+".key"), + err) + return nil, nil } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString("grpc.ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) - return nil + glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err) + return nil, nil } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) @@ -35,23 +50,41 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { ClientAuth: tls.RequireAndVerifyClientCert, }) - return grpc.Creds(ta) + allowedCommonNames := config.GetString(component + ".allowed_commonNames") + allowedWildcardDomain := config.GetString("grpc.allowed_wildcard_domain") + if allowedCommonNames != "" || allowedWildcardDomain != "" { + allowedCommonNamesMap := make(map[string]bool) + for _, s := range strings.Split(allowedCommonNames, ",") { + allowedCommonNamesMap[s] = true + } + auther := Authenticator{ + AllowedCommonNames: allowedCommonNamesMap, + AllowedWildcardDomain: allowedWildcardDomain, + } + return grpc.Creds(ta), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(auther.Authenticate)) + } + return grpc.Creds(ta), nil } -func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { +func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { if config == nil { return grpc.WithInsecure() } + certFileName, keyFileName, caFileName := config.GetString(component+".cert"), config.GetString(component+".key"), config.GetString("grpc.ca") + if certFileName == "" || keyFileName == "" || caFileName == "" { + return grpc.WithInsecure() + } + // load cert/key, cacert - cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + cert, err := tls.LoadX509KeyPair(certFileName, keyFileName) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(caFileName) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return grpc.WithInsecure() } caCertPool := x509.NewCertPool() @@ -64,3 +97,28 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { }) return grpc.WithTransportCredentials(ta) } + +func (a Authenticator) Authenticate(ctx context.Context) (newCtx context.Context, err error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "no peer found") + } + + tlsAuth, ok := p.AuthInfo.(credentials.TLSInfo) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "unexpected peer transport credentials") + } + if len(tlsAuth.State.VerifiedChains) == 0 || len(tlsAuth.State.VerifiedChains[0]) == 0 { + return ctx, status.Error(codes.Unauthenticated, "could not verify peer certificate") + } + + commonName := tlsAuth.State.VerifiedChains[0][0].Subject.CommonName + if a.AllowedWildcardDomain != "" && strings.HasSuffix(commonName, a.AllowedWildcardDomain) { + return ctx, nil + } + if _, ok := a.AllowedCommonNames[commonName]; ok { + return ctx, nil + } + + return ctx, status.Errorf(codes.Unauthenticated, "invalid subject common name: %s", commonName) +} diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go new file mode 100644 index 000000000..300449fa0 --- /dev/null +++ b/weed/sequence/snowflake_sequencer.go @@ -0,0 +1,46 @@ +package sequence + +import ( + "fmt" + "hash/fnv" + + "github.com/bwmarrin/snowflake" + "github.com/chrislusf/seaweedfs/weed/glog" +) + +// a simple snowflake Sequencer +type SnowflakeSequencer struct { + node *snowflake.Node +} + +func NewSnowflakeSequencer(nodeid string) (*SnowflakeSequencer, error) { + nodeid_hash := hash(nodeid) & 0x3ff + glog.V(0).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash) + node, err := snowflake.NewNode(int64(nodeid_hash)) + if err != nil { + fmt.Println(err) + return nil, err + } + + sequencer := &SnowflakeSequencer{node: node} + return sequencer, nil +} + +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +func (m *SnowflakeSequencer) NextFileId(count uint64) uint64 { + return uint64(m.node.Generate().Int64()) +} + +// ignore setmax as we are snowflake +func (m *SnowflakeSequencer) SetMax(seenValue uint64) { +} + +// return a new id as no Peek is stored +func (m *SnowflakeSequencer) Peek() uint64 { + return uint64(m.node.Generate().Int64()) +} diff --git a/weed/server/common.go b/weed/server/common.go index 888ddec49..5c5f1b8eb 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -1,10 +1,11 @@ package weed_server import ( - "bytes" "encoding/json" "errors" "fmt" + "io" + "mime/multipart" "net/http" "path/filepath" "strconv" @@ -37,14 +38,22 @@ func init() { func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) (err error) { var bytes []byte - if r.FormValue("pretty") != "" { - bytes, err = json.MarshalIndent(obj, "", " ") - } else { - bytes, err = json.Marshal(obj) + if obj != nil { + if r.FormValue("pretty") != "" { + bytes, err = json.MarshalIndent(obj, "", " ") + } else { + bytes, err = json.Marshal(obj) + } } if err != nil { return } + + if httpStatus >= 400 { + glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s", + r.Method, r.URL.String(), httpStatus, string(bytes)) + } + callback := r.FormValue("callback") if callback == "" { w.Header().Set("Content-Type", "application/json") @@ -77,7 +86,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { @@ -90,7 +100,7 @@ func debug(params ...interface{}) { glog.V(4).Infoln(params...) } -func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) { +func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) { m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -98,13 +108,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) + pu, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return } - debug("assigning file id for", fname) + debug("assigning file id for", pu.FileName) r.ParseForm() count := uint64(1) if r.FormValue("count") != "" { @@ -117,32 +127,34 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st ar := &operation.VolumeAssignRequest{ Count: count, DataCenter: r.FormValue("dataCenter"), + Rack: r.FormValue("rack"), Replication: r.FormValue("replication"), Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), + DiskType: r.FormValue("disk"), } - assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar) + assignResult, ae := operation.Assign(masterFn, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return } url := "http://" + assignResult.Url + "/" + assignResult.Fid - if lastModified != 0 { - url = url + "?ts=" + strconv.FormatUint(lastModified, 10) + if pu.ModifiedTime != 0 { + url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10) } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) + uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return } - m["fileName"] = fname + m["fileName"] = pu.FileName m["fid"] = assignResult.Fid m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid - m["size"] = originalDataSize + m["size"] = pu.OriginalDataSize m["eTag"] = uploadResult.ETag writeJsonQuiet(w, r, http.StatusCreated, m) return @@ -183,19 +195,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b func statsHealthHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() writeJsonQuiet(w, r, http.StatusOK, m) } func statsCounterHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Counters"] = serverStats writeJsonQuiet(w, r, http.StatusOK, m) } func statsMemoryHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Memory"] = stats.MemStat() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -209,3 +221,106 @@ func handleStaticResources2(r *mux.Router) { r.Handle("/favicon.ico", http.FileServer(statikFS)) r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS))) } + +func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, filename string) { + if filename != "" { + contentDisposition := "inline" + if r.FormValue("dl") != "" { + if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { + contentDisposition = "attachment" + } + } + w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) + } +} + +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { + rangeReq := r.Header.Get("Range") + + if rangeReq == "" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if err := writeFn(w, 0, totalSize); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + //the rest is dealing with partial content request + //mostly copy from src/pkg/net/http/fs.go + ranges, err := parseRange(rangeReq, totalSize) + if err != nil { + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > totalSize { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + return + } + if len(ranges) == 0 { + return + } + if len(ranges) == 1 { + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) + + err = writeFn(w, ra.start, ra.length) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + // process multiple ranges + for _, ra := range ranges { + if ra.start > totalSize { + http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize := rangesMIMESize(ranges, mimeType, totalSize) + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent := pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part, ra.start, ra.length); e != nil { + pw.CloseWithError(e) + return + } + } + mw.Close() + pw.Close() + }() + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + w.WriteHeader(http.StatusPartialContent) + if _, err := io.CopyN(w, sendContent, sendSize); err != nil { + http.Error(w, "Internal Error", http.StatusInternalServerError) + return + } +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a84feec2d..3821de6a9 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -6,89 +6,98 @@ import ( "os" "path/filepath" "strconv" - "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) + + entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name)) + if err == filer_pb.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, err + } if err != nil { + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } return &filer_pb.LookupDirectoryEntryResponse{ Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: entry.IsDirectory(), - Attributes: filer2.EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, + Name: req.Name, + IsDirectory: entry.IsDirectory(), + Attributes: filer.EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, }, }, nil } -func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error { +func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) { + + glog.V(4).Infof("ListEntries %v", req) limit := int(req.Limit) if limit == 0 { limit = fs.option.DirListingLimit } - paginationLimit := filer2.PaginationSize + paginationLimit := filer.PaginationSize if limit < paginationLimit { paginationLimit = limit } lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom + var listErr error for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) - if err != nil { - return err - } - if len(entries) == 0 { - return nil - } - - includeLastFile = false - - for _, entry := range entries { - - lastFileName = entry.Name() - - if req.Prefix != "" { - if !strings.HasPrefix(entry.Name(), req.Prefix) { - continue - } - } - - if err := stream.Send(&filer_pb.ListEntriesResponse{ + var hasEntries bool + lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool { + hasEntries = true + if err = stream.Send(&filer_pb.ListEntriesResponse{ Entry: &filer_pb.Entry{ - Name: entry.Name(), - IsDirectory: entry.IsDirectory(), - Chunks: entry.Chunks, - Attributes: filer2.EntryAttributeToPb(entry), - Extended: entry.Extended, + Name: entry.Name(), + IsDirectory: entry.IsDirectory(), + Chunks: entry.Chunks, + Attributes: filer.EntryAttributeToPb(entry), + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, }, }); err != nil { - return err + return false } + limit-- if limit == 0 { - return nil + return false } - } + return true + }) - if len(entries) < paginationLimit { - break + if listErr != nil { + return listErr + } + if err != nil { + return err } + if !hasEntries { + return nil + } + + includeLastFile = false } @@ -126,46 +135,75 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol return resp, nil } +func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) { + fid, err := needle.ParseFileIdFromString(fileId) + if err != nil { + return nil, err + } + locations, found := fs.filer.MasterClient.GetLocations(uint32(fid.VolumeId)) + if !found || len(locations) == 0 { + return nil, fmt.Errorf("not found volume %d in %s", fid.VolumeId, fileId) + } + for _, loc := range locations { + targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId)) + } + return +} + func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) - chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) + glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name) - if req.Entry.Attributes == nil { - return nil, fmt.Errorf("can not create entry with empty attributes") - } + resp = &filer_pb.CreateEntryResponse{} - err = fs.filer.CreateEntry(ctx, &filer2.Entry{ - FullPath: fullpath, - Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), - Chunks: chunks, - }) + chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry) + if err2 != nil { + return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) + } - if err == nil { - fs.filer.DeleteChunks(garbages) + createErr := fs.filer.CreateEntry(ctx, &filer.Entry{ + FullPath: util.JoinPath(req.Directory, req.Entry.Name), + Attr: filer.PbToEntryAttribute(req.Entry.Attributes), + Chunks: chunks, + Extended: req.Entry.Extended, + HardLinkId: filer.HardLinkId(req.Entry.HardLinkId), + HardLinkCounter: req.Entry.HardLinkCounter, + Content: req.Entry.Content, + }, req.OExcl, req.IsFromOtherCluster, req.Signatures) + + if createErr == nil { + fs.filer.DeleteChunks(garbage) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath)) + glog.V(4).Infof("UpdateEntry %v", req) + + fullpath := util.Join(req.Directory, req.Entry.Name) + entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } - // remove old chunks if not included in the new ones - unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks) - - chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) + chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry) + if err2 != nil { + return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) + } - newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), - Attr: entry.Attr, - Extended: req.Entry.Extended, - Chunks: chunks, + newEntry := &filer.Entry{ + FullPath: util.JoinPath(req.Directory, req.Entry.Name), + Attr: entry.Attr, + Extended: req.Entry.Extended, + Chunks: chunks, + HardLinkId: filer.HardLinkId(req.Entry.HardLinkId), + HardLinkCounter: req.Entry.HardLinkCounter, + Content: req.Entry.Content, } glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v", @@ -188,76 +226,166 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } - if filer2.EqualEntry(entry, newEntry) { + if filer.EqualEntry(entry, newEntry) { return &filer_pb.UpdateEntryResponse{}, err } if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { - fs.filer.DeleteChunks(unusedChunks) - fs.filer.DeleteChunks(garbages) - } + fs.filer.DeleteChunks(garbage) + + fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures) - fs.filer.NotifyUpdateEvent(entry, newEntry, true) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) + } return &filer_pb.UpdateEntryResponse{}, err } -func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) - return &filer_pb.DeleteEntryResponse{}, err -} +func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { -func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { + // remove old chunks if not included in the new ones + if existingEntry != nil { + garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks) + if err != nil { + return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err) + } + } - ttlStr := "" - if req.TtlSec > 0 { - ttlStr = strconv.Itoa(int(req.TtlSec)) + // files with manifest chunks are usually large and append only, skip calculating covered chunks + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks) + + chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks) + garbage = append(garbage, coveredChunks...) + + if newEntry.Attributes != nil { + so := fs.detectStorageOption(fullpath, + newEntry.Attributes.Collection, + newEntry.Attributes.Replication, + newEntry.Attributes.TtlSec, + newEntry.Attributes.DiskType, + "", + "", + ) + chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks) + if err != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", err) + } } - var altRequest *operation.VolumeAssignRequest + chunks = append(chunks, manifestChunks...) + + return +} - dataCenter := req.DataCenter - if dataCenter == "" { - dataCenter = fs.option.DataCenter +func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) { + + glog.V(4).Infof("AppendToEntry %v", req) + + fullpath := util.NewFullPath(req.Directory, req.EntryName) + var offset int64 = 0 + entry, err := fs.filer.FindEntry(ctx, fullpath) + if err == filer_pb.ErrNotFound { + entry = &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(filer.TotalSize(entry.Chunks)) } - assignRequest := &operation.VolumeAssignRequest{ - Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, - Ttl: ttlStr, - DataCenter: dataCenter, + for _, chunk := range req.Chunks { + chunk.Offset = offset + offset += int64(chunk.Size) } - if dataCenter != "" { - altRequest = &operation.VolumeAssignRequest{ - Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, - Ttl: ttlStr, - DataCenter: "", - } + + entry.Chunks = append(entry.Chunks, req.Chunks...) + so := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, entry.DiskType, "", "") + entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks) + if err != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", err) } - assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) + + err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil) + + return &filer_pb.AppendToEntryResponse{}, err +} + +func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { + + glog.V(4).Infof("DeleteEntry %v", req) + + err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures) + resp = &filer_pb.DeleteEntryResponse{} + if err != nil && err != filer_pb.ErrNotFound { + resp.Error = err.Error() + } + return resp, nil +} + +func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { + + so := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack) + + assignRequest, altRequest := so.ToAssignRequests(int(req.Count)) + + assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) if err != nil { - return nil, fmt.Errorf("assign volume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } return &filer_pb.AssignVolumeResponse{ - FileId: assignResult.Fid, - Count: int32(assignResult.Count), - Url: assignResult.Url, - PublicUrl: assignResult.PublicUrl, - Auth: string(assignResult.Auth), - }, err + FileId: assignResult.Fid, + Count: int32(assignResult.Count), + Url: assignResult.Url, + PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), + Collection: so.Collection, + Replication: so.Replication, + }, nil +} + +func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) { + + glog.V(4).Infof("CollectionList %v", req) + resp = &filer_pb.CollectionListResponse{} + + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + masterResp, err := client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ + IncludeNormalVolumes: req.IncludeNormalVolumes, + IncludeEcVolumes: req.IncludeEcVolumes, + }) + if err != nil { + return err + } + for _, c := range masterResp.Collections { + resp.Collections = append(resp.Collections, &filer_pb.Collection{Name: c.Name}) + } + return nil + }) + + return } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + glog.V(4).Infof("DeleteCollection %v", req) + + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: req.GetCollection(), }) return err @@ -268,13 +396,23 @@ func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.Delet func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { - input := &master_pb.StatisticsRequest{ - Replication: req.Replication, - Collection: req.Collection, - Ttl: req.Ttl, - } + var output *master_pb.StatisticsResponse + + err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error { + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{ + Replication: req.Replication, + Collection: req.Collection, + Ttl: req.Ttl, + DiskType: req.DiskType, + }) + if grpcErr != nil { + return grpcErr + } + + output = grpcResponse + return nil + }) - output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input) if err != nil { return nil, err } @@ -288,10 +426,91 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { - return &filer_pb.GetFilerConfigurationResponse{ - Masters: fs.option.Masters, - Collection: fs.option.Collection, - Replication: fs.option.DefaultReplication, - MaxMb: uint32(fs.option.MaxMB), - }, nil + t := &filer_pb.GetFilerConfigurationResponse{ + Masters: fs.option.Masters, + Collection: fs.option.Collection, + Replication: fs.option.DefaultReplication, + MaxMb: uint32(fs.option.MaxMB), + DirBuckets: fs.filer.DirBucketsPath, + Cipher: fs.filer.Cipher, + Signature: fs.filer.Signature, + MetricsAddress: fs.metricsAddress, + MetricsIntervalSec: int32(fs.metricsIntervalSec), + } + + glog.V(4).Infof("GetFilerConfiguration: %v", t) + + return t, nil +} + +func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { + + req, err := stream.Recv() + if err != nil { + return err + } + + clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort) + m := make(map[string]bool) + for _, tp := range req.Resources { + m[tp] = true + } + fs.brokersLock.Lock() + fs.brokers[clientName] = m + glog.V(0).Infof("+ broker %v", clientName) + fs.brokersLock.Unlock() + + defer func() { + fs.brokersLock.Lock() + delete(fs.brokers, clientName) + glog.V(0).Infof("- broker %v: %v", clientName, err) + fs.brokersLock.Unlock() + }() + + for { + if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { + glog.V(0).Infof("send broker %v: %+v", clientName, err) + return err + } + // println("replied") + + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("recv broker %v: %v", clientName, err) + return err + } + // println("received") + } + +} + +func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { + + resp = &filer_pb.LocateBrokerResponse{} + + fs.brokersLock.Lock() + defer fs.brokersLock.Unlock() + + var localBrokers []*filer_pb.LocateBrokerResponse_Resource + + for b, m := range fs.brokers { + if _, found := m[req.Resource]; found { + resp.Found = true + resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ + { + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }, + } + return + } + localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }) + } + + resp.Resources = localBrokers + + return resp, nil + } diff --git a/weed/server/filer_grpc_server_kv.go b/weed/server/filer_grpc_server_kv.go new file mode 100644 index 000000000..3cb47115e --- /dev/null +++ b/weed/server/filer_grpc_server_kv.go @@ -0,0 +1,42 @@ +package weed_server + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (fs *FilerServer) KvGet(ctx context.Context, req *filer_pb.KvGetRequest) (*filer_pb.KvGetResponse, error) { + + value, err := fs.filer.Store.KvGet(ctx, req.Key) + if err == filer.ErrKvNotFound { + return &filer_pb.KvGetResponse{}, nil + } + + if err != nil { + return &filer_pb.KvGetResponse{Error: err.Error()}, nil + } + + return &filer_pb.KvGetResponse{ + Value: value, + }, nil + +} + +// KvPut sets the key~value. if empty value, delete the kv entry +func (fs *FilerServer) KvPut(ctx context.Context, req *filer_pb.KvPutRequest) (*filer_pb.KvPutResponse, error) { + + if len(req.Value) == 0 { + if err := fs.filer.Store.KvDelete(ctx, req.Key); err != nil { + return &filer_pb.KvPutResponse{Error: err.Error()}, nil + } + } + + err := fs.filer.Store.KvPut(ctx, req.Key, req.Value) + if err != nil { + return &filer_pb.KvPutResponse{Error: err.Error()}, nil + } + + return &filer_pb.KvPutResponse{}, nil + +} diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index dfa59e7fe..eadb970d5 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -3,61 +3,67 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" + "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { glog.V(1).Infof("AtomicRenameEntry %v", req) + oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) + newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) + + if err := fs.filer.CanRename(oldParent, newParent); err != nil { + return nil, err + } + ctx, err := fs.filer.BeginTransaction(ctx) if err != nil { return nil, err } - oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory)) - oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) if err != nil { fs.filer.RollbackTransaction(ctx) return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) } - var events MoveEvents - moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, newParent, req.NewName) if moveErr != nil { fs.filer.RollbackTransaction(ctx) - return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) + return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr) } else { if commitError := fs.filer.CommitTransaction(ctx); commitError != nil { fs.filer.RollbackTransaction(ctx) - return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, err) + return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, commitError) } } - for _, entry := range events.newEntries { - fs.filer.NotifyUpdateEvent(nil, entry, false) - } - for _, entry := range events.oldEntries { - fs.filer.NotifyUpdateEvent(entry, nil, false) - } - return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { - if entry.IsDirectory() { - if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { - return err +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { + + if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error { + if entry.IsDirectory() { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { + return err + } } + return nil + }); err != nil { + return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err) } - return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events) + + return nil } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -68,7 +74,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer includeLastFile := false for { - entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "", "") if err != nil { return err } @@ -78,19 +84,19 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer for _, item := range entries { lastFileName = item.Name() // println("processing", lastFileName) - err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name()) if err != nil { return err } } - if len(entries) < 1024 { + if !hasMore { break } } return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) @@ -102,29 +108,30 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP } // add to new directory - newEntry := &filer2.Entry{ + newEntry := &filer.Entry{ FullPath: newPath, Attr: entry.Attr, Chunks: entry.Chunks, + Extended: entry.Extended, + Content: entry.Content, } - createErr := fs.filer.CreateEntry(ctx, newEntry) + createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, nil) if createErr != nil { return createErr } + if moveFolderSubEntries != nil { + if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil { + return moveChildrenErr + } + } + // delete old entry - deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false) + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, nil) if deleteErr != nil { return deleteErr } - events.oldEntries = append(events.oldEntries, entry) - events.newEntries = append(events.newEntries, newEntry) return nil } - -type MoveEvents struct { - oldEntries []*filer2.Entry - newEntries []*filer2.Entry -} diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go new file mode 100644 index 000000000..d9f91b125 --- /dev/null +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -0,0 +1,202 @@ +package weed_server + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + var processedTsNs int64 + var err error + + for { + + processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) + if err != nil { + return fmt.Errorf("reading from persisted logs: %v", err) + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + lastReadTime, err = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.filer.MetaAggregator.ListenersLock.Lock() + fs.filer.MetaAggregator.ListenersCond.Wait() + fs.filer.MetaAggregator.ListenersLock.Unlock() + return true + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + var processedTsNs int64 + var err error + + for { + // println("reading from persisted logs ...") + processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) + if err != nil { + return fmt.Errorf("reading from persisted logs: %v", err) + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + // glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + // println("reading from in memory logs ...") + + lastReadTime, err = fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.listenersLock.Lock() + fs.listenersCond.Wait() + fs.listenersLock.Unlock() + return true + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error { + return func(logEntry *filer_pb.LogEntry) error { + event := &filer_pb.SubscribeMetadataResponse{} + if err := proto.Unmarshal(logEntry.Data, event); err != nil { + glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + } + + if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil { + return err + } + + return nil + } +} + +func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string, clientSignature int32) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + + foundSelf := false + for _, sig := range eventNotification.Signatures { + if sig == clientSignature && clientSignature != 0 { + return nil + } + if sig == fs.filer.Signature { + foundSelf = true + } + } + if !foundSelf { + eventNotification.Signatures = append(eventNotification.Signatures, fs.filer.Signature) + } + + // get complete path to the file or directory + var entryName string + if eventNotification.OldEntry != nil { + entryName = eventNotification.OldEntry.Name + } else if eventNotification.NewEntry != nil { + entryName = eventNotification.NewEntry.Name + } + + fullpath := util.Join(dirPath, entryName) + + // skip on filer internal meta logs + if strings.HasPrefix(fullpath, filer.SystemLogDir) { + return nil + } + + if !strings.HasPrefix(fullpath, req.PathPrefix) { + if eventNotification.NewParentPath != "" { + newFullPath := util.Join(eventNotification.NewParentPath, entryName) + if !strings.HasPrefix(newFullPath, req.PathPrefix) { + return nil + } + } else { + return nil + } + } + + message := &filer_pb.SubscribeMetadataResponse{ + Directory: dirPath, + EventNotification: eventNotification, + TsNs: tsNs, + } + // println("sending", dirPath, entryName) + if err := stream.Send(message); err != nil { + glog.V(0).Infof("=> client %v: %+v", clientName, err) + return err + } + return nil + } +} + +func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ listener %v", clientName) + return +} + +func (fs *FilerServer) deleteClient(clientName string) { + glog.V(0).Infof("- listener %v", clientName) +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 41ba81366..2734223ea 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -5,23 +5,35 @@ import ( "fmt" "net/http" "os" + "sync" "time" + "github.com/chrislusf/seaweedfs/weed/stats" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" - _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" - _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" - _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" - _ "github.com/chrislusf/seaweedfs/weed/filer2/tikv" + "github.com/chrislusf/seaweedfs/weed/filer" + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" @@ -30,46 +42,74 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { - Masters []string - Collection string - DefaultReplication string - RedirectOnRead bool - DisableDirListing bool - MaxMB int - DirListingLimit int - DataCenter string - DefaultLevelDbDir string - DisableHttp bool - Port int + Masters []string + Collection string + DefaultReplication string + DisableDirListing bool + MaxMB int + DirListingLimit int + DataCenter string + Rack string + DefaultLevelDbDir string + DisableHttp bool + Host string + Port uint32 + recursiveDelete bool + Cipher bool + SaveToFilerLimit int64 + Filers []string + ConcurrentUploadLimit int64 } type FilerServer struct { option *FilerOption secret security.SigningKey - filer *filer2.Filer + filer *filer.Filer grpcDialOption grpc.DialOption + + // metrics read from the master + metricsAddress string + metricsIntervalSec int + + // notifying clients + listenersLock sync.Mutex + listenersCond *sync.Cond + + brokers map[string]map[string]bool + brokersLock sync.Mutex + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), + brokers: make(map[string]map[string]bool), + inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), } + fs.listenersCond = sync.NewCond(&fs.listenersLock) if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) + fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, option.DataCenter, func() { + fs.listenersCond.Broadcast() + }) + fs.filer.Cipher = option.Cipher + + fs.checkWithMaster() + go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), fs.metricsAddress, fs.metricsIntervalSec) go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() + v := util.GetViper() if !util.LoadConfiguration("filer", false) { v.Set("leveldb2.enabled", true) v.Set("leveldb2.dir", option.DefaultLevelDbDir) @@ -77,56 +117,73 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) if os.IsNotExist(err) { os.MkdirAll(option.DefaultLevelDbDir, 0755) } + glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir) + } else { + glog.Warningf("skipping default store dir in %s", option.DefaultLevelDbDir) } util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + v.SetDefault("filer.options.buckets_folder", "/buckets") + fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder") + // TODO deprecated, will be be removed after 2020-12-31 + // replaced by https://github.com/chrislusf/seaweedfs/wiki/Path-Specific-Configuration + fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) if !option.DisableHttp { defaultMux.HandleFunc("/", fs.filerHandler) } if defaultMux != readonlyMux { + handleStaticResources(readonlyMux) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } - maybeStartMetrics(fs, option) + fs.filer.AggregateFromPeers(fmt.Sprintf("%s:%d", option.Host, option.Port), option.Filers) + + fs.filer.LoadBuckets() + + fs.filer.LoadFilerConf() + + grace.OnInterrupt(func() { + fs.filer.Shutdown() + }) return fs, nil } -func maybeStartMetrics(fs *FilerServer, option *FilerOption) { +func (fs *FilerServer) checkWithMaster() { + + for _, master := range fs.option.Masters { + _, err := pb.ParseServerToGrpcAddress(master) + if err != nil { + glog.Fatalf("invalid master address %s: %v", master, err) + } + } + isConnected := false - var metricsAddress string - var metricsIntervalSec int - var readErr error for !isConnected { - metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, option.Masters[0]) - if readErr == nil { - isConnected = true - } else { - time.Sleep(7 * time.Second) + for _, master := range fs.option.Masters { + readErr := operation.WithMasterServerClient(master, fs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", master, err) + } + fs.metricsAddress, fs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + if fs.option.DefaultReplication == "" { + fs.option.DefaultReplication = resp.DefaultReplication + } + return nil + }) + if readErr == nil { + isConnected = true + } else { + time.Sleep(7 * time.Second) + } } } - if metricsAddress == "" && metricsIntervalSec <= 0 { - return - } - go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather, - func() (addr string, intervalSeconds int) { - return metricsAddress, metricsIntervalSec - }) -} -func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { - err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) - } - metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) - return nil - }) - return } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index b6bfc3b04..ed6bbb6f6 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -1,14 +1,36 @@ package weed_server import ( + "github.com/chrislusf/seaweedfs/weed/util" "net/http" + "strings" + "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/stats" ) func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // proxy to volume servers + var fileId string + if strings.HasPrefix(r.RequestURI, "/?proxyChunkId=") { + fileId = r.RequestURI[len("/?proxyChunkId="):] + } + if fileId != "" { + stats.FilerRequestCounter.WithLabelValues("proxy").Inc() + fs.proxyToVolumeServer(w, r, fileId) + stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds()) + return + } + + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { case "GET": stats.FilerRequestCounter.WithLabelValues("get").Inc() @@ -20,20 +42,53 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) case "DELETE": stats.FilerRequestCounter.WithLabelValues("delete").Inc() - fs.DeleteHandler(w, r) + if _, ok := r.URL.Query()["tagging"]; ok { + fs.DeleteTaggingHandler(w, r) + } else { + fs.DeleteHandler(w, r) + } stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) - case "PUT": - stats.FilerRequestCounter.WithLabelValues("put").Inc() - fs.PostHandler(w, r) - stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) - case "POST": - stats.FilerRequestCounter.WithLabelValues("post").Inc() - fs.PostHandler(w, r) - stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + case "POST", "PUT": + + // wait until in flight data is less than the limit + contentLength := getContentLength(r) + fs.inFlightDataLimitCond.L.Lock() + for atomic.LoadInt64(&fs.inFlightDataSize) > fs.option.ConcurrentUploadLimit { + fs.inFlightDataLimitCond.Wait() + } + atomic.AddInt64(&fs.inFlightDataSize, contentLength) + fs.inFlightDataLimitCond.L.Unlock() + defer func() { + atomic.AddInt64(&fs.inFlightDataSize, -contentLength) + fs.inFlightDataLimitCond.Signal() + }() + + if r.Method == "PUT" { + stats.FilerRequestCounter.WithLabelValues("put").Inc() + if _, ok := r.URL.Query()["tagging"]; ok { + fs.PutTaggingHandler(w, r) + } else { + fs.PostHandler(w, r, contentLength) + } + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) + } else { // method == "POST" + stats.FilerRequestCounter.WithLabelValues("post").Inc() + fs.PostHandler(w, r, contentLength) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + } + case "OPTIONS": + stats.FilerRequestCounter.WithLabelValues("options").Inc() + OptionsHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) } } func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } start := time.Now() switch r.Method { case "GET": @@ -44,5 +99,18 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) + case "OPTIONS": + stats.FilerRequestCounter.WithLabelValues("options").Inc() + OptionsHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) + } +} + +func OptionsHandler(w http.ResponseWriter, r *http.Request, isReadOnly bool) { + if isReadOnly { + w.Header().Add("Access-Control-Allow-Methods", "GET, OPTIONS") + } else { + w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") } + w.Header().Add("Access-Control-Allow-Headers", "*") } diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go new file mode 100644 index 000000000..b8b28790b --- /dev/null +++ b/weed/server/filer_server_handlers_proxy.go @@ -0,0 +1,67 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math/rand" + "net/http" +) + +var ( + client *http.Client +) + +func init() { + client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} +} + +func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Request, fileId string) { + + urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId) + if err != nil { + glog.Errorf("locate %s: %v", fileId, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if len(urlStrings) == 0 { + w.WriteHeader(http.StatusNotFound) + return + } + + proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.Intn(len(urlStrings))], r.Body) + if err != nil { + glog.Errorf("NewRequest %s: %v", urlStrings[0], err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + proxyReq.Header.Set("Host", r.Host) + proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) + + for header, values := range r.Header { + for _, value := range values { + proxyReq.Header.Add(header, value) + } + } + + proxyResponse, postErr := client.Do(proxyReq) + + if postErr != nil { + glog.Errorf("post to filer: %v", postErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + defer util.CloseResponse(proxyResponse) + + for k, v := range proxyResponse.Header { + w.Header()[k] = v + } + w.WriteHeader(proxyResponse.StatusCode) + io.Copy(w, proxyResponse.Body) + +} diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ba21298ba..6bc09e953 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,19 +1,22 @@ package weed_server import ( + "bytes" "context" "io" - "io/ioutil" "mime" - "mime/multipart" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" + "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/images" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -26,13 +29,13 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) return } - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { glog.V(1).Infof("Not found %s: %v", path, err) stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() w.WriteHeader(http.StatusNotFound) @@ -58,196 +61,107 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, return } - if len(entry.Chunks) == 0 { - glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr) - stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc() - w.WriteHeader(http.StatusNoContent) - return - } - w.Header().Set("Accept-Ranges", "bytes") - if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) - w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - setEtag(w, filer2.ETag(entry.Chunks)) - return - } - - if len(entry.Chunks) == 1 { - fs.handleSingleChunk(w, r, entry) - return - } - - fs.handleMultipleChunks(w, r, entry) - -} - -func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - - fileId := entry.Chunks[0].GetFileIdString() - - urlString, err := fs.filer.MasterClient.LookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - - if fs.option.RedirectOnRead { - stats.FilerRequestCounter.WithLabelValues("redirect").Inc() - http.Redirect(w, r, urlString, http.StatusFound) - return - } - - u, _ := url.Parse(urlString) - q := u.Query() - for key, values := range r.URL.Query() { - for _, value := range values { - q.Add(key, value) - } - } - u.RawQuery = q.Encode() - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - glog.V(3).Infoln("retrieving from", u) - resp, do_err := util.Do(request) - if do_err != nil { - glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, do_err) - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - for k, v := range resp.Header { - w.Header()[k] = v - } - if entry.Attr.Mime != "" { - w.Header().Set("Content-Type", entry.Attr.Mime) - } - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) -} - -func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { + // mime type mimeType := entry.Attr.Mime if mimeType == "" { - if ext := path.Ext(entry.Name()); ext != "" { + if ext := filepath.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - setEtag(w, filer2.ETag(entry.Chunks)) - totalSize := int64(filer2.TotalSize(entry.Chunks)) + // if modified since + if !entry.Attr.Mtime.IsZero() { + w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat)) + if r.Header.Get("If-Modified-Since") != "" { + if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil { + if !t.Before(entry.Attr.Mtime) { + w.WriteHeader(http.StatusNotModified) + return + } + } + } + } - rangeReq := r.Header.Get("Range") + // print out the header from extended properties + for k, v := range entry.Extended { + w.Header().Set(k, string(v)) + } - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + //Seaweed custom header are not visible to Vue or javascript + seaweedHeaders := []string{} + for header, _ := range w.Header() { + if strings.HasPrefix(header, "Seaweed-") { + seaweedHeaders = append(seaweedHeaders, header) } - return } + seaweedHeaders = append(seaweedHeaders, "Content-Disposition") + w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ",")) - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return + //set tag count + if r.Method == "GET" { + tagCount := 0 + for k := range entry.Extended { + if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") { + tagCount++ + } + } + if tagCount > 0 { + w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount)) + } } - if len(ranges) == 0 { + + // set etag + etag := filer.ETagEntry(entry) + if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" { + w.WriteHeader(http.StatusNotModified) return } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - - err = fs.writeContent(w, entry, ra.start, int(ra.length)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + setEtag(w, etag) + + filename := entry.Name() + filename = url.QueryEscape(filename) + adjustHeaderContentDisposition(w, r, filename) + + totalSize := int64(entry.Size()) + + if r.Method == "HEAD" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true) + }) return } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil { - pw.CloseWithError(e) + if rangeReq := r.Header.Get("Range"); rangeReq == "" { + ext := filepath.Ext(filename) + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks) + if err != nil { + glog.Errorf("failed to read %s: %v", path, err) + w.WriteHeader(http.StatusNotModified) return } + rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode) + io.Copy(w, rs) + return } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - if _, err := io.CopyN(w, sendContent, sendSize); err != nil { - http.Error(w, "Internal Error", http.StatusInternalServerError) - return } -} - -func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { - - return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size) + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if offset+size <= int64(len(entry.Content)) { + _, err := writer.Write(entry.Content[offset : offset+size]) + if err != nil { + glog.Errorf("failed to write entry content: %v", err) + } + return err + } + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false) + }) } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 87e864559..307c411b6 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -2,14 +2,17 @@ package weed_server import ( "context" + "encoding/base64" + "fmt" + "github.com/skip2/go-qrcode" "net/http" "strconv" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) // listDirectoryHandler lists directories and folers under a directory @@ -31,8 +34,10 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque } lastFileName := r.FormValue("lastFileName") + namePattern := r.FormValue("namePattern") + namePatternExclude := r.FormValue("namePatternExclude") - entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) + entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) @@ -40,7 +45,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque return } - shouldDisplayLoadMore := len(entries) == limit if path == "/" { path = "" } @@ -65,21 +69,30 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName, shouldDisplayLoadMore, }) - } else { - ui.StatusTpl.Execute(w, struct { - Path string - Breadcrumbs []ui.Breadcrumb - Entries interface{} - Limit int - LastFileName string - ShouldDisplayLoadMore bool - }{ - path, - ui.ToBreadcrumb(path), - entries, - limit, - lastFileName, - shouldDisplayLoadMore, - }) + return + } + + var qrImageString string + img, err := qrcode.Encode(fmt.Sprintf("http://%s:%d%s", fs.option.Host, fs.option.Port, r.URL.Path), qrcode.Medium, 128) + if err == nil { + qrImageString = base64.StdEncoding.EncodeToString(img) } + + ui.StatusTpl.Execute(w, struct { + Path string + Breadcrumbs []ui.Breadcrumb + Entries interface{} + Limit int + LastFileName string + ShouldDisplayLoadMore bool + QrImage string + }{ + path, + ui.ToBreadcrumb(path), + entries, + limit, + lastFileName, + shouldDisplayLoadMore, + qrImageString, + }) } diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go new file mode 100644 index 000000000..50b3a2c06 --- /dev/null +++ b/weed/server/filer_server_handlers_tagging.go @@ -0,0 +1,102 @@ +package weed_server + +import ( + "context" + "net/http" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// add or replace one file Seaweed- prefixed attributes +// curl -X PUT -H "Seaweed-Name1: value1" http://localhost:8888/path/to/a/file?tagging +func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) { + + ctx := context.Background() + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + if existingEntry == nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + + if existingEntry.Extended == nil { + existingEntry.Extended = make(map[string][]byte) + } + + for header, values := range r.Header { + if strings.HasPrefix(header, needle.PairNamePrefix) { + for _, value := range values { + existingEntry.Extended[header] = []byte(value) + } + } + } + + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil { + glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + + writeJsonQuiet(w, r, http.StatusAccepted, nil) + return +} + +// remove all Seaweed- prefixed attributes +// curl -X DELETE http://localhost:8888/path/to/a/file?tagging +func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Request) { + + ctx := context.Background() + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + if existingEntry == nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + + if existingEntry.Extended == nil { + existingEntry.Extended = make(map[string][]byte) + } + + hasDeletion := false + for header, _ := range existingEntry.Extended { + if strings.HasPrefix(header, needle.PairNamePrefix) { + delete(existingEntry.Extended, header) + hasDeletion = true + } + } + + if !hasDeletion { + writeJsonQuiet(w, r, http.StatusNotModified, nil) + return + } + + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil { + glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + + writeJsonQuiet(w, r, http.StatusAccepted, nil) + return +} diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 236e7027d..95eba9d3d 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -2,26 +2,17 @@ package weed_server import ( "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" "net/http" - "net/url" "os" - filenamePath "path" - "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -32,271 +23,130 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { +func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, urlLocation string, auth security.EncodedJwt, err error) { stats.FilerRequestCounter.WithLabelValues("assign").Inc() start := time.Now() defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() - ar := &operation.VolumeAssignRequest{ - Count: 1, - Replication: replication, - Collection: collection, - Ttl: r.URL.Query().Get("ttl"), - DataCenter: dataCenter, - } - var altRequest *operation.VolumeAssignRequest - if dataCenter != "" { - altRequest = &operation.VolumeAssignRequest{ - Count: 1, - Replication: replication, - Collection: collection, - Ttl: r.URL.Query().Get("ttl"), - DataCenter: "", - } - } + ar, altRequest := so.ToAssignRequests(1) - assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest) if ae != nil { glog.Errorf("failing to assign a file id: %v", ae) - writeJsonError(w, r, http.StatusInternalServerError, ae) err = ae return } fileId = assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid + if so.Fsync { + urlLocation += "?fsync=true" + } auth = assignResult.Auth return } -func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { +func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, contentLength int64) { ctx := context.Background() query := r.URL.Query() - replication := query.Get("replication") - if replication == "" { - replication = fs.option.DefaultReplication - } - collection := query.Get("collection") - if collection == "" { - collection = fs.option.Collection - } - dataCenter := query.Get("dataCenter") - if dataCenter == "" { - dataCenter = fs.option.DataCenter - } + so := fs.detectStorageOption0(r.RequestURI, + query.Get("collection"), + query.Get("replication"), + query.Get("ttl"), + query.Get("disk"), + query.Get("dataCenter"), + query.Get("rack"), + ) + + fs.autoChunk(ctx, w, r, contentLength, so) + util.CloseRequest(r) - if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { - return - } +} - fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) +// curl -X DELETE http://localhost:8888/path/to +// curl -X DELETE http://localhost:8888/path/to?recursive=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true +func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - if err != nil || fileId == "" || urlLocation == "" { - glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) - return + isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } } + ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" + skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) - - u, _ := url.Parse(urlLocation) - - // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off - // because they need to provide FIDs instead of file paths... - cm, _ := strconv.ParseBool(query.Get("cm")) - if cm { - q := u.Query() - q.Set("cm", "true") - u.RawQuery = q.Encode() + objectPath := r.URL.Path + if len(r.URL.Path) > 1 && strings.HasSuffix(objectPath, "/") { + objectPath = objectPath[0 : len(objectPath)-1] } - glog.V(4).Infoln("post to", u) - ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil) if err != nil { + glog.V(1).Infoln("deleting", objectPath, ":", err.Error()) + httpStatus := http.StatusInternalServerError + if err == filer_pb.ErrNotFound { + httpStatus = http.StatusNoContent + } + writeJsonError(w, r, httpStatus, err) return } - if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil { - return - } - - // send back post result - reply := FilerPostResult{ - Name: ret.Name, - Size: ret.Size, - Error: ret.Error, - Fid: fileId, - Url: urlLocation, - } - setEtag(w, ret.ETag) - writeJsonQuiet(w, r, http.StatusCreated, reply) + w.WriteHeader(http.StatusNoContent) } -// update metadata in filer store -func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, - replication string, collection string, ret operation.UploadResult, fileId string) (err error) { - - stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) - }() +func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, diskType string, dataCenter, rack string) *operation.StorageOption { + collection := util.Nvl(qCollection, fs.option.Collection) + replication := util.Nvl(qReplication, fs.option.DefaultReplication) - modeStr := r.URL.Query().Get("mode") - if modeStr == "" { - modeStr = "0660" + // required by buckets folder + bucketDefaultReplication, fsync := "", false + if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { + collection = fs.filer.DetectBucket(util.FullPath(requestURI)) + bucketDefaultReplication, fsync = fs.filer.ReadBucketOption(collection) } - mode, err := strconv.ParseUint(modeStr, 8, 32) - if err != nil { - glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) - mode = 0660 - } - - path := r.URL.Path - if strings.HasSuffix(path, "/") { - if ret.Name != "" { - path += ret.Name - } - } - existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path)) - crTime := time.Now() - if err == nil && existingEntry != nil { - crTime = existingEntry.Crtime - } - entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), - Attr: filer2.Attr{ - Mtime: time.Now(), - Crtime: crTime, - Mode: os.FileMode(mode), - Uid: OS_UID, - Gid: OS_GID, - Replication: replication, - Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), - }, - Chunks: []*filer_pb.FileChunk{{ - FileId: fileId, - Size: uint64(ret.Size), - Mtime: time.Now().UnixNano(), - ETag: ret.ETag, - }}, - } - if ext := filenamePath.Ext(path); ext != "" { - entry.Attr.Mime = mime.TypeByExtension(ext) - } - // glog.V(4).Infof("saving %s => %+v", path, entry) - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.Chunks) - glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) - writeJsonError(w, r, http.StatusInternalServerError, dbErr) - err = dbErr - return + if replication == "" { + replication = bucketDefaultReplication } - return nil -} - -// send request to volume server -func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) { - - stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() - start := time.Now() - defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + rule := fs.filer.FilerConf.MatchStorageRule(requestURI) - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - if auth != "" { - request.Header.Set("Authorization", "BEARER "+string(auth)) - } - resp, doErr := util.Do(request) - if doErr != nil { - glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method) - writeJsonError(w, r, http.StatusInternalServerError, doErr) - err = doErr - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - etag := resp.Header.Get("ETag") - respBody, raErr := ioutil.ReadAll(resp.Body) - if raErr != nil { - glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) - writeJsonError(w, r, http.StatusInternalServerError, raErr) - err = raErr - return - } - glog.V(4).Infoln("post result", string(respBody)) - unmarshalErr := json.Unmarshal(respBody, &ret) - if unmarshalErr != nil { - glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody)) - writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr) - err = unmarshalErr - return - } - if ret.Error != "" { - err = errors.New(ret.Error) - glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - // find correct final path - path := r.URL.Path - if strings.HasSuffix(path, "/") { - if ret.Name != "" { - path += ret.Name - } else { - err = fmt.Errorf("can not to write to folder %s without a file name", path) - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") - writeJsonError(w, r, http.StatusInternalServerError, err) - return + if ttlSeconds == 0 { + ttl, err := needle.ReadTTL(rule.GetTtl()) + if err != nil { + glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) } + ttlSeconds = int32(ttl.Minutes()) * 60 } - if etag != "" { - ret.ETag = etag + + return &operation.StorageOption{ + Replication: util.Nvl(replication, rule.Replication), + Collection: util.Nvl(collection, rule.Collection), + DataCenter: util.Nvl(dataCenter, fs.option.DataCenter), + Rack: util.Nvl(rack, fs.option.Rack), + TtlSeconds: ttlSeconds, + DiskType: util.Nvl(diskType, rule.DiskType), + Fsync: fsync || rule.Fsync, + VolumeGrowthCount: rule.VolumeGrowthCount, } - return } -// curl -X DELETE http://localhost:8888/path/to -// curl -X DELETE http://localhost:8888/path/to?recursive=true -// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true -// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true -func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - - isRecursive := r.FormValue("recursive") == "true" - ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" - skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" +func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, diskType string, dataCenter, rack string) *operation.StorageOption { - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) + ttl, err := needle.ReadTTL(qTtl) if err != nil { - glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) - httpStatus := http.StatusInternalServerError - if err == filer2.ErrNotFound { - httpStatus = http.StatusNotFound - } - writeJsonError(w, r, httpStatus, err) - return + glog.Errorf("fail to parse ttl %s: %v", qTtl, err) } - w.WriteHeader(http.StatusNoContent) + return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack) } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 8ff7ab2c0..c4f10d94e 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,31 +1,27 @@ package weed_server import ( - "bytes" "context" + "fmt" "io" - "io/ioutil" "net/http" + "os" "path" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/security" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - replication string, collection string, dataCenter string) bool { - if r.Method != "POST" { - glog.V(4).Infoln("AutoChunking not supported for method", r.Method) - return false - } +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, so *operation.StorageOption) { // autoChunking can be set at the command-line level or as a query param. Query param overrides command-line query := r.URL.Query() @@ -35,174 +31,308 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * if maxMB <= 0 && fs.option.MaxMB > 0 { maxMB = int32(fs.option.MaxMB) } - if maxMB <= 0 { - glog.V(4).Infoln("AutoChunking not enabled") - return false - } - glog.V(4).Infoln("AutoChunking level set to", maxMB, "(MB)") chunkSize := 1024 * 1024 * maxMB - contentLength := int64(0) - if contentLengthHeader := r.Header["Content-Length"]; len(contentLengthHeader) == 1 { - contentLength, _ = strconv.ParseInt(contentLengthHeader[0], 10, 64) - if contentLength <= int64(chunkSize) { - glog.V(4).Infoln("Content-Length of", contentLength, "is less than the chunk size of", chunkSize, "so autoChunking will be skipped.") - return false - } - } + stats.FilerRequestCounter.WithLabelValues("chunk").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunk").Observe(time.Since(start).Seconds()) + }() - if contentLength <= 0 { - glog.V(4).Infoln("Content-Length value is missing or unexpected so autoChunking will be skipped.") - return false + var reply *FilerPostResult + var err error + var md5bytes []byte + if r.Method == "POST" { + if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { + reply, err = fs.mkdir(ctx, w, r) + } else { + reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, contentLength, so) + } + } else { + reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, contentLength, so) } - - reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) if err != nil { - writeJsonError(w, r, http.StatusInternalServerError, err) + if strings.HasPrefix(err.Error(), "read input:") { + writeJsonError(w, r, 499, err) + } else if strings.HasSuffix(err.Error(), "is a file") { + writeJsonError(w, r, http.StatusConflict, err) + } else { + writeJsonError(w, r, http.StatusInternalServerError, err) + } } else if reply != nil { + if len(md5bytes) > 0 { + w.Header().Set("Content-MD5", util.Base64Encode(md5bytes)) + } writeJsonQuiet(w, r, http.StatusCreated, reply) } - return true } -func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { - - stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds()) - }() +func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { - return nil, multipartReaderErr + return nil, nil, multipartReaderErr } part1, part1Err := multipartReader.NextPart() if part1Err != nil { - return nil, part1Err + return nil, nil, part1Err } fileName := part1.FileName() if fileName != "" { fileName = path.Base(fileName) } + contentType := part1.Header.Get("Content-Type") + if contentType == "application/octet-stream" { + contentType = "" + } - var fileChunks []*filer_pb.FileChunk + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, contentLength, so) + if err != nil { + return nil, nil, err + } - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) - chunkOffset := int64(0) - writtenChunks := 0 + md5bytes = md5Hash.Sum(nil) + filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) - filerResult = &FilerPostResult{ - Name: fileName, - } + return +} - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] +func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) + fileName := path.Base(r.URL.Path) + contentType := r.Header.Get("Content-Type") + if contentType == "application/octet-stream" { + contentType = "" + } - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, contentLength, so) + if err != nil { + return nil, nil, err + } - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "", fileId, auth) - if uploadErr != nil { - return nil, uploadErr - } + md5bytes = md5Hash.Sum(nil) + filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) - } + return +} - totalBytesRead = totalBytesRead + int64(bytesRead) +func isAppend(r *http.Request) bool { + return r.URL.Query().Get("op") == "append" +} - if bytesRead == 0 || readFully { - break - } +func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) { - if readErr != nil { - return nil, readErr - } + // detect file mode + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 } + // fix the path path := r.URL.Path if strings.HasSuffix(path, "/") { if fileName != "" { path += fileName } + } else { + if fileName != "" { + if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil { + if possibleDirEntry.IsDirectory() { + path += "/" + fileName + } + } + } } - glog.V(4).Infoln("saving", path) - entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), - Attr: filer2.Attr{ - Mtime: time.Now(), - Crtime: time.Now(), - Mode: 0660, - Uid: OS_UID, - Gid: OS_GID, - Replication: replication, - Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), - }, - Chunks: fileChunks, + var entry *filer.Entry + var mergedChunks []*filer_pb.FileChunk + // when it is an append + if isAppend(r) { + existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path)) + if findErr != nil && findErr != filer_pb.ErrNotFound { + glog.V(0).Infof("failing to find %s: %v", path, findErr) + } + entry = existingEntry + } + if entry != nil { + entry.Mtime = time.Now() + entry.Md5 = nil + // adjust chunk offsets + for _, chunk := range fileChunks { + chunk.Offset += int64(entry.FileSize) + } + mergedChunks = append(entry.Chunks, fileChunks...) + entry.FileSize += uint64(chunkOffset) + + // TODO + if len(entry.Content) > 0 { + replyerr = fmt.Errorf("append to small file is not supported yet") + return + } + + } else { + glog.V(4).Infoln("saving", path) + mergedChunks = fileChunks + entry = &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.FileMode(mode), + Uid: OS_UID, + Gid: OS_GID, + Replication: so.Replication, + Collection: so.Collection, + TtlSec: so.TtlSeconds, + DiskType: so.DiskType, + Mime: contentType, + Md5: md5bytes, + FileSize: uint64(chunkOffset), + }, + Content: content, + } } - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.Chunks) + + // maybe compact entry chunks + mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks) + if replyerr != nil { + glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) + return + } + entry.Chunks = mergedChunks + + filerResult = &FilerPostResult{ + Name: fileName, + Size: int64(entry.FileSize), + } + + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + + SaveAmzMetaData(r, entry.Extended, false) + + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, needle.PairNamePrefix) { + entry.Extended[k] = []byte(v[0]) + } + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + fs.filer.DeleteChunks(fileChunks) replyerr = dbErr filerResult.Error = dbErr.Error() glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) + } + return filerResult, replyerr +} + +func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { + + return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, "", "", assignErr + } + + // upload the chunk to the volume server + uploadResult, uploadErr, _ := operation.Upload(urlLocation, name, fs.option.Cipher, reader, false, "", nil, auth) + if uploadErr != nil { + return nil, "", "", uploadErr + } + + return uploadResult.ToPbFileChunk(fileId, offset), so.Collection, so.Replication, nil + } +} + +func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http.Request) (filerResult *FilerPostResult, replyerr error) { + + // detect file mode + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 + } + + // fix the path + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err == nil && existingEntry != nil { + replyerr = fmt.Errorf("dir %s already exists", path) return } - return + glog.V(4).Infoln("mkdir", path) + entry := &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.FileMode(mode) | os.ModeDir, + Uid: OS_UID, + Gid: OS_GID, + }, + } + + filerResult = &FilerPostResult{ + Name: util.FullPath(path).Name(), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + replyerr = dbErr + filerResult.Error = dbErr.Error() + glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr) + } + return filerResult, replyerr } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { +func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool) (metadata map[string][]byte) { - stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) - }() + metadata = make(map[string][]byte) + if !isReplace { + for k, v := range existing { + metadata[k] = v + } + } - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) + if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { + metadata[xhttp.AmzStorageClass] = []byte(sc) } - if uploadError != nil { - err = uploadError + + if tags := r.Header.Get(xhttp.AmzObjectTagging); tags != "" { + for _, v := range strings.Split(tags, "&") { + tag := strings.Split(v, "=") + if len(tag) == 2 { + metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + } + } } + + for header, values := range r.Header { + if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + for _, value := range values { + metadata[header] = []byte(value) + } + } + } + return + } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go new file mode 100644 index 000000000..8334d1618 --- /dev/null +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -0,0 +1,91 @@ +package weed_server + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// handling single chunk POST or PUT upload +func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) { + + fileId, urlLocation, auth, err := fs.assignNewFileInfo(so) + + if err != nil || fileId == "" || urlLocation == "" { + return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter) + } + + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + + // Note: encrypt(gzip(data)), encrypt data first, then gzip + + sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 + + pu, err := needle.ParseUpload(r, sizeLimit) + uncompressedData := pu.Data + if pu.IsGzipped { + uncompressedData = pu.UncompressedData + } + if pu.MimeType == "" { + pu.MimeType = http.DetectContentType(uncompressedData) + // println("detect2 mimetype to", pu.MimeType) + } + + uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth) + if uploadError != nil { + return nil, fmt.Errorf("upload to volume server: %v", uploadError) + } + + // Save to chunk manifest structure + fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)} + + // fmt.Printf("uploaded: %+v\n", uploadResult) + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if pu.FileName != "" { + path += pu.FileName + } + } + + entry := &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: 0660, + Uid: OS_UID, + Gid: OS_GID, + Replication: so.Replication, + Collection: so.Collection, + TtlSec: so.TtlSeconds, + DiskType: so.DiskType, + Mime: pu.MimeType, + Md5: util.Base64Md5ToBytes(pu.ContentMd5), + }, + Chunks: fileChunks, + } + + filerResult = &FilerPostResult{ + Name: pu.FileName, + Size: int64(pu.OriginalDataSize), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + err = dbErr + filerResult.Error = dbErr.Error() + return + } + + return +} diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go new file mode 100644 index 000000000..3ab45453e --- /dev/null +++ b/weed/server/filer_server_handlers_write_upload.go @@ -0,0 +1,105 @@ +package weed_server + +import ( + "crypto/md5" + "hash" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { + var fileChunks []*filer_pb.FileChunk + + md5Hash := md5.New() + var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) + + chunkOffset := int64(0) + var smallContent []byte + + for { + limitedReader := io.LimitReader(partReader, int64(chunkSize)) + + data, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, nil, 0, err, nil + } + if chunkOffset == 0 && !isAppend(r) { + if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 { + smallContent = data + chunkOffset += int64(len(data)) + break + } + } + dataReader := util.NewBytesReader(data) + + // retry to assign a different file id + var fileId, urlLocation string + var auth security.EncodedJwt + var assignErr, uploadErr error + var uploadResult *operation.UploadResult + for i := 0; i < 3; i++ { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, nil, 0, assignErr, nil + } + + // upload the chunk to the volume server + uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth) + if uploadErr != nil { + time.Sleep(251 * time.Millisecond) + continue + } + break + } + if uploadErr != nil { + return nil, nil, 0, uploadErr, nil + } + + // if last chunk exhausted the reader exactly at the border + if uploadResult.Size == 0 { + break + } + + // Save to chunk manifest structure + fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset)) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size)) + + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadResult.Size) + + // if last chunk was not at full chunk size, but already exhausted the reader + if int64(uploadResult.Size) < int64(chunkSize) { + break + } + } + + return fileChunks, md5Hash, chunkOffset, nil, smallContent +} + +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { + + stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds()) + }() + + uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) + if uploadResult != nil && uploadResult.RetryCount > 0 { + stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount)) + } + return uploadResult, err, data +} diff --git a/weed/server/filer_server_rocksdb.go b/weed/server/filer_server_rocksdb.go new file mode 100644 index 000000000..5fcc7e88f --- /dev/null +++ b/weed/server/filer_server_rocksdb.go @@ -0,0 +1,7 @@ +// +build rocksdb + +package weed_server + +import ( + _ "github.com/chrislusf/seaweedfs/weed/filer/rocksdb" +) diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index 2f0df7f91..5016117a8 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -1,8 +1,9 @@ -package master_ui +package filer_ui import ( - "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type Breadcrumb struct { @@ -16,7 +17,7 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { for i := 0; i < len(parts); i++ { crumb := Breadcrumb{ Name: parts[i] + " /", - Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), + Link: "/" + util.Join(parts[0:i+1]...), } if !strings.HasSuffix(crumb.Link, "/") { crumb.Link += "/" diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index e532b27e2..648b97f22 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -1,20 +1,31 @@ -package master_ui +package filer_ui import ( "github.com/dustin/go-humanize" "html/template" + "net/url" + "strings" ) +func printpath(parts ...string) string { + concat := strings.Join(parts, "") + escaped := url.PathEscape(concat) + return strings.ReplaceAll(escaped, "%2F", "/") +} + var funcMap = template.FuncMap{ "humanizeBytes": humanize.Bytes, + "printpath": printpath, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOCTYPE html> <html> <head> - <title>SeaweedFS Filer</title> - <link rel="stylesheet" href="/seaweedfsstatic/bootstrap/3.3.1/css/bootstrap.min.css"> + <title>SeaweedFS Filer</title> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <link rel="stylesheet" href="/seaweedfsstatic/bootstrap/3.3.1/css/bootstrap.min.css"> <style> +body { padding-bottom: 128px; } #drop-area { border: 1px transparent; } @@ -37,6 +48,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC #fileElem { display: none; } +.qrImage { + display: block; + margin-left: auto; + margin-right: auto; +} </style> </head> <body> @@ -50,7 +66,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <div class="row"> <div> {{ range $entry := .Breadcrumbs }} - <a href="{{ $entry.Link }}" > + <a href="{{ printpath $entry.Link }}" > {{ $entry.Name }} </a> {{ end }} @@ -69,11 +85,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <td> {{if $entry.IsDirectory}} <img src="/seaweedfsstatic/images/folder.gif" width="20" height="23"> - <a href={{ print $path "/" $entry.Name "/"}} > + <a href="{{ printpath $path "/" $entry.Name "/"}}" > {{ $entry.Name }} </a> {{else}} - <a href={{ print $path "/" $entry.Name }} > + <a href="{{ printpath $path "/" $entry.Name }}" > {{ $entry.Name }} </a> {{end}} @@ -107,6 +123,14 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC </a> </div> {{end}} + + <br/> + <br/> + + <div class="navbar navbar-fixed-bottom"> + <img src="data:image/png;base64,{{.QrImage}}" class="qrImage" /> + </div> + </div> </body> <script type="text/javascript"> diff --git a/weed/server/gateway_server.go b/weed/server/gateway_server.go new file mode 100644 index 000000000..608217ed7 --- /dev/null +++ b/weed/server/gateway_server.go @@ -0,0 +1,106 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/operation" + "google.golang.org/grpc" + "math/rand" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/util" + + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" + "github.com/chrislusf/seaweedfs/weed/glog" + _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" + _ "github.com/chrislusf/seaweedfs/weed/notification/log" + "github.com/chrislusf/seaweedfs/weed/security" +) + +type GatewayOption struct { + Masters []string + Filers []string + MaxMB int + IsSecure bool +} + +type GatewayServer struct { + option *GatewayOption + secret security.SigningKey + grpcDialOption grpc.DialOption +} + +func NewGatewayServer(defaultMux *http.ServeMux, option *GatewayOption) (fs *GatewayServer, err error) { + + fs = &GatewayServer{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + } + + if len(option.Masters) == 0 { + glog.Fatal("master list is required!") + } + + defaultMux.HandleFunc("/blobs/", fs.blobsHandler) + defaultMux.HandleFunc("/files/", fs.filesHandler) + defaultMux.HandleFunc("/topics/", fs.topicsHandler) + + return fs, nil +} + +func (fs *GatewayServer) getMaster() string { + randMaster := rand.Intn(len(fs.option.Masters)) + return fs.option.Masters[randMaster] +} + +func (fs *GatewayServer) blobsHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "DELETE": + chunkId := r.URL.Path[len("/blobs/"):] + fullUrl, err := operation.LookupFileId(fs.getMaster, chunkId) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + var jwtAuthorization security.EncodedJwt + if fs.option.IsSecure { + jwtAuthorization = operation.LookupJwt(fs.getMaster(), chunkId) + } + body, statusCode, err := util.DeleteProxied(fullUrl, string(jwtAuthorization)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + w.WriteHeader(statusCode) + w.Write(body) + case "POST": + submitForClientHandler(w, r, fs.getMaster, fs.grpcDialOption) + } +} + +func (fs *GatewayServer) filesHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "DELETE": + case "POST": + } +} + +func (fs *GatewayServer) topicsHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST": + } +} diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index fcfd98f7b..3e6d9bb9e 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -1,29 +1,32 @@ package weed_server import ( + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "net" "strings" "time" "github.com/chrislusf/raft" + "google.golang.org/grpc/peer" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" - "google.golang.org/grpc/peer" ) func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { var dn *topology.DataNode - t := ms.Topo defer func() { if dn != nil { + // if the volume server disconnects and reconnects quickly + // the unregister and register can race with each other + ms.Topo.UnRegisterDataNode(dn) glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) - t.UnRegisterDataNode(dn) message := &master_pb.VolumeLocation{ Url: dn.Url(), @@ -58,39 +61,33 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ return err } - t.Sequence.SetMax(heartbeat.MaxFileKey) + ms.Topo.Sequence.SetMax(heartbeat.MaxFileKey) if dn == nil { - if heartbeat.Ip == "" { - if pr, ok := peer.FromContext(stream.Context()); ok { - if pr.Addr != net.Addr(nil) { - heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")] - glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip) - } - } - } - dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) - dc := t.GetOrCreateDataCenter(dcName) + dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) + dc := ms.Topo.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) - dn = rack.GetOrCreateDataNode(heartbeat.Ip, - int(heartbeat.Port), heartbeat.PublicUrl, - int64(heartbeat.MaxVolumeCount)) + dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ - VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, - MetricsAddress: ms.option.MetricsAddress, - MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), - StorageBackends: backend.ToPbStorageBackends(), + VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, }); err != nil { glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) return err } } + dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts) + glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) + var dataCenter string + if dc := dn.GetDataCenter(); dc != nil { + dataCenter = string(dc.Id()) + } message := &master_pb.VolumeLocation{ - Url: dn.Url(), - PublicUrl: dn.PublicUrl, + Url: dn.Url(), + PublicUrl: dn.PublicUrl, + DataCenter: dataCenter, } if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { // process delta volume ids if exists for fast volume id updates @@ -101,12 +98,12 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ message.DeletedVids = append(message.DeletedVids, volInfo.Id) } // update master internal volume layouts - t.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn) + ms.Topo.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn) } if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes { // process heartbeat.Volumes - newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn) + newVolumes, deletedVolumes := ms.Topo.SyncDataNodeRegistration(heartbeat.Volumes, dn) for _, v := range newVolumes { glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url()) @@ -121,7 +118,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 { // update master internal volume layouts - t.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn) + ms.Topo.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn) for _, s := range heartbeat.NewEcShards { message.NewVids = append(message.NewVids, s.Id) @@ -136,8 +133,8 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ } if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards { - glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) - newShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn) + glog.V(1).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) + newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn) // broadcast the ec vid changes to master clients for _, s := range newShards { @@ -151,7 +148,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ } } - if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 { ms.clientChansLock.RLock() for host, ch := range ms.clientChans { @@ -162,7 +158,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ } // tell the volume servers about the leader - newLeader, err := t.Leader() + newLeader, err := ms.Topo.Leader() if err != nil { glog.Warningf("SendHeartbeat find leader: %v", err) return err @@ -189,35 +185,14 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ return ms.informNewLeader(stream) } - // remember client address - ctx := stream.Context() - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return fmt.Errorf("failed to get peer from ctx") - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return fmt.Errorf("failed to get peer address") - } + peerAddress := findClientAddress(stream.Context(), req.GrpcPort) - clientName := req.Name + pr.Addr.String() - glog.V(0).Infof("+ client %v", clientName) + // buffer by 1 so we don't end up getting stuck writing to stopChan forever + stopChan := make(chan bool, 1) - messageChan := make(chan *master_pb.VolumeLocation) - stopChan := make(chan bool) + clientName, messageChan := ms.addClient(req.Name, peerAddress) - ms.clientChansLock.Lock() - ms.clientChans[clientName] = messageChan - ms.clientChansLock.Unlock() - - defer func() { - glog.V(0).Infof("- client %v", clientName) - ms.clientChansLock.Lock() - delete(ms.clientChans, clientName) - ms.clientChansLock.Unlock() - }() + defer ms.deleteClient(clientName) for _, message := range ms.Topo.ToVolumeLocations() { if err := stream.Send(message); err != nil { @@ -253,7 +228,6 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ } } - return nil } func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error { @@ -269,3 +243,78 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe } return nil } + +func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ client %v", clientName) + + // we buffer this because otherwise we end up in a potential deadlock where + // the KeepConnected loop is no longer listening on this channel but we're + // trying to send to it in SendHeartbeat and so we can't lock the + // clientChansLock to remove the channel and we're stuck writing to it + // 100 is probably overkill + messageChan = make(chan *master_pb.VolumeLocation, 100) + + ms.clientChansLock.Lock() + ms.clientChans[clientName] = messageChan + ms.clientChansLock.Unlock() + return +} + +func (ms *MasterServer) deleteClient(clientName string) { + glog.V(0).Infof("- client %v", clientName) + ms.clientChansLock.Lock() + delete(ms.clientChans, clientName) + ms.clientChansLock.Unlock() +} + +func findClientAddress(ctx context.Context, grpcPort uint32) string { + // fmt.Printf("FromContext %+v\n", ctx) + pr, ok := peer.FromContext(ctx) + if !ok { + glog.Error("failed to get peer from ctx") + return "" + } + if pr.Addr == net.Addr(nil) { + glog.Error("failed to get peer address") + return "" + } + if grpcPort == 0 { + return pr.Addr.String() + } + if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok { + externalIP := tcpAddr.IP + return fmt.Sprintf("%s:%d", externalIP, grpcPort) + } + return pr.Addr.String() + +} + +func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { + resp := &master_pb.ListMasterClientsResponse{} + ms.clientChansLock.RLock() + defer ms.clientChansLock.RUnlock() + + for k := range ms.clientChans { + if strings.HasPrefix(k, req.ClientType+"@") { + resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:]) + } + } + return resp, nil +} + +func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) { + + // tell the volume servers about the leader + leader, _ := ms.Topo.Leader() + + resp := &master_pb.GetMasterConfigurationResponse{ + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + StorageBackends: backend.ToPbStorageBackends(), + DefaultReplication: ms.option.DefaultReplicaPlacement, + Leader: leader, + } + + return resp, nil +} diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go new file mode 100644 index 000000000..93c9e4e4e --- /dev/null +++ b/weed/server/master_grpc_server_admin.go @@ -0,0 +1,143 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "math/rand" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +/* +How exclusive lock works? +----------- + +Shell +------ +When shell lock, + * lease an admin token (lockTime, token) + * start a goroutine to renew the admin token periodically + +When shell unlock + * stop the renewal goroutine + * sends a release lock request + +Master +------ +Master maintains: + * randomNumber + * lastLockTime +When master receives the lease/renew request from shell + If lastLockTime still fresh { + if is a renew and token is valid { + // for renew + generate the randomNumber => token + return + } + refuse + return + } else { + // for fresh lease request + generate the randomNumber => token + return + } + +When master receives the release lock request from shell + set the lastLockTime to zero + + +The volume server does not need to verify. +This makes the lock/unlock optional, similar to what golang code usually does. + +*/ + +const ( + LockDuration = 10 * time.Second +) + +type AdminLock struct { + accessSecret int64 + accessLockTime time.Time + lastClient string +} + +type AdminLocks struct { + locks map[string]*AdminLock + sync.RWMutex +} + +func NewAdminLocks() *AdminLocks { + return &AdminLocks{ + locks: make(map[string]*AdminLock), + } +} + +func (locks *AdminLocks) isLocked(lockName string) (clientName string, isLocked bool) { + locks.RLock() + defer locks.RUnlock() + adminLock, found := locks.locks[lockName] + if !found { + return "", false + } + glog.V(4).Infof("isLocked %v", adminLock.lastClient) + return adminLock.lastClient, adminLock.accessLockTime.Add(LockDuration).After(time.Now()) +} + +func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool { + locks.RLock() + defer locks.RUnlock() + adminLock, found := locks.locks[lockName] + if !found { + return false + } + return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token +} + +func (locks *AdminLocks) generateToken(lockName string, clientName string) (ts time.Time, token int64) { + locks.Lock() + defer locks.Unlock() + lock := &AdminLock{ + accessSecret: rand.Int63(), + accessLockTime: time.Now(), + lastClient: clientName, + } + locks.locks[lockName] = lock + return lock.accessLockTime, lock.accessSecret +} + +func (locks *AdminLocks) deleteLock(lockName string) { + locks.Lock() + defer locks.Unlock() + delete(locks.locks, lockName) +} + +func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) { + resp := &master_pb.LeaseAdminTokenResponse{} + + if lastClient, isLocked := ms.adminLocks.isLocked(req.LockName); isLocked { + glog.V(4).Infof("LeaseAdminToken %v", lastClient) + if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { + // for renew + ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName) + resp.Token, resp.LockTsNs = token, ts.UnixNano() + return resp, nil + } + // refuse since still locked + return resp, fmt.Errorf("already locked by " + lastClient) + } + // for fresh lease request + ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName) + resp.Token, resp.LockTsNs = token, ts.UnixNano() + return resp, nil +} + +func (ms *MasterServer) ReleaseAdminToken(ctx context.Context, req *master_pb.ReleaseAdminTokenRequest) (*master_pb.ReleaseAdminTokenResponse, error) { + resp := &master_pb.ReleaseAdminTokenResponse{} + if ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { + ms.adminLocks.deleteLock(req.LockName) + } + return resp, nil +} diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go index f8e0785f6..b92d6bcbe 100644 --- a/weed/server/master_grpc_server_collection.go +++ b/weed/server/master_grpc_server_collection.go @@ -4,6 +4,7 @@ import ( "context" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 856c07890..156afd4a1 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -3,8 +3,8 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -61,11 +61,13 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest if err != nil { return nil, err } + diskType := types.ToDiskType(req.DiskType) option := &topology.VolumeGrowOption{ Collection: req.Collection, ReplicaPlacement: replicaPlacement, Ttl: ttl, + DiskType: diskType, Prealloacte: ms.preallocateSize, DataCenter: req.DataCenter, Rack: req.Rack, @@ -74,8 +76,8 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } if !ms.Topo.HasWritableVolume(option) { - if ms.Topo.FreeSpace() <= 0 { - return nil, fmt.Errorf("No free volumes left!") + if ms.Topo.AvailableSpaceFor(option) <= 0 { + return nil, fmt.Errorf("no free volumes left for " + option.String()) } ms.vgLock.Lock() if !ms.Topo.HasWritableVolume(option) { @@ -118,9 +120,8 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic return nil, err } - volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl) + volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType)) stats := volumeLayout.Stats() - resp := &master_pb.StatisticsResponse{ TotalSize: stats.TotalSize, UsedSize: stats.UsedSize, @@ -177,12 +178,15 @@ func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.Looku return resp, nil } -func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) { +func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumVolumeRequest) (*master_pb.VacuumVolumeResponse, error) { - resp := &master_pb.GetMasterConfigurationResponse{ - MetricsAddress: ms.option.MetricsAddress, - MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError } + resp := &master_pb.VacuumVolumeResponse{} + + ms.Topo.Vacuum(ms.grpcDialOption, float64(req.GarbageThreshold), ms.preallocateSize) + return resp, nil } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 33a5129da..e2b2df18d 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -1,19 +1,20 @@ package weed_server import ( - "context" "fmt" "net/http" "net/http/httputil" "net/url" "os" "regexp" - "strconv" "strings" "sync" "time" "github.com/chrislusf/raft" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -22,9 +23,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc" ) const ( @@ -33,11 +31,12 @@ const ( ) type MasterOption struct { - Port int - MetaFolder string - VolumeSizeLimitMB uint - VolumePreallocate bool - PulseSeconds int + Host string + Port int + MetaFolder string + VolumeSizeLimitMB uint + VolumePreallocate bool + // PulseSeconds int DefaultReplicaPlacement string GarbageThreshold float64 WhiteList []string @@ -56,7 +55,7 @@ type MasterServer struct { vg *topology.VolumeGrowth vgLock sync.Mutex - bounedLeaderChan chan int + boundedLeaderChan chan int // notifying clients clientChansLock sync.RWMutex @@ -65,11 +64,13 @@ type MasterServer struct { grpcDialOption grpc.DialOption MasterClient *wdclient.MasterClient + + adminLocks *AdminLocks } func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -78,35 +79,39 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste v.SetDefault("jwt.signing.read.expires_after_seconds", 60) readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") + v.SetDefault("master.replication.treat_replication_as_minimums", false) + replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums") + var preallocateSize int64 if option.VolumePreallocate { preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } - grpcDialOption := security.LoadClientTLS(v.Sub("grpc"), "master") + grpcDialOption := security.LoadClientTLS(v, "grpc.master") ms := &MasterServer{ option: option, preallocateSize: preallocateSize, clientChans: make(map[string]chan *master_pb.VolumeLocation), grpcDialOption: grpcDialOption, - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers), + adminLocks: NewAdminLocks(), } - ms.bounedLeaderChan = make(chan int, 16) + ms.boundedLeaderChan = make(chan int, 16) seq := ms.createSequencer(option) if nil == seq { glog.Fatalf("create sequencer failed.") } - ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, ms.option.PulseSeconds) + ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, 5, replicationAsMin) ms.vg = topology.NewDefaultVolumeGrowth() glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") ms.guard = security.NewGuard(ms.option.WhiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) + handleStaticResources2(r) + r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler)) + r.HandleFunc("/ui/index.html", ms.uiStatusHandler) if !ms.option.DisableHttp { - handleStaticResources2(r) - r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler)) - r.HandleFunc("/ui/index.html", ms.uiStatusHandler) r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) r.HandleFunc("/dir/lookup", ms.guard.WhiteList(ms.dirLookupHandler)) r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) @@ -115,9 +120,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + /* + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + */ r.HandleFunc("/{fileId}", ms.redirectHandler) } @@ -131,14 +138,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { ms.Topo.RaftServer = raftServer.raftServer ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) { - glog.V(0).Infof("event: %+v", e) + glog.V(0).Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value()) if ms.Topo.RaftServer.Leader() != "" { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.") } }) - ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) { - glog.V(0).Infof("state change: %+v", e) - }) if ms.Topo.IsLeader() { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!") } else { @@ -148,13 +152,13 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { } } -func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { +func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { f(w, r) } else if ms.Topo.RaftServer != nil && ms.Topo.RaftServer.Leader() != "" { - ms.bounedLeaderChan <- 1 - defer func() { <-ms.bounedLeaderChan }() + ms.boundedLeaderChan <- 1 + defer func() { <-ms.boundedLeaderChan }() targetUrl, err := url.Parse("http://" + ms.Topo.RaftServer.Leader()) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, @@ -183,7 +187,7 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ func (ms *MasterServer) startAdminScripts() { var err error - v := viper.GetViper() + v := util.GetViper() adminScripts := v.GetString("master.maintenance.scripts") glog.V(0).Infof("adminScripts:\n%v", adminScripts) if adminScripts == "" { @@ -193,20 +197,25 @@ func (ms *MasterServer) startAdminScripts() { v.SetDefault("master.maintenance.sleep_minutes", 17) sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") - v.SetDefault("master.filer.default_filer_url", "http://localhost:8888/") - filerURL := v.GetString("master.filer.default_filer_url") + v.SetDefault("master.filer.default", "localhost:8888") + filerHostPort := v.GetString("master.filer.default") scriptLines := strings.Split(adminScripts, "\n") + if !strings.Contains(adminScripts, "lock") { + scriptLines = append(append([]string{}, "lock"), scriptLines...) + scriptLines = append(scriptLines, "unlock") + } - masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) + masterAddress := fmt.Sprintf("%s:%d", ms.option.Host, ms.option.Port) var shellOptions shell.ShellOptions - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "master") + shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.Masters = &masterAddress - shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL) + shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort) + shellOptions.Directory = "/" if err != nil { - glog.V(0).Infof("failed to parse master.filer.default_filer_urll=%s : %v\n", filerURL, err) + glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err) return } @@ -220,27 +229,11 @@ func (ms *MasterServer) startAdminScripts() { commandEnv.MasterClient.WaitUntilConnected() c := time.Tick(time.Duration(sleepMinutes) * time.Minute) - for _ = range c { + for range c { if ms.Topo.IsLeader() { for _, line := range scriptLines { - - cmds := reg.FindAllString(line, -1) - if len(cmds) == 0 { - continue - } - args := make([]string, len(cmds[1:])) - for i := range args { - args[i] = strings.Trim(string(cmds[1+i]), "\"'") - } - cmd := strings.ToLower(cmds[0]) - - for _, c := range shell.Commands { - if c.Name() == cmd { - glog.V(0).Infof("executing: %s %v", cmd, args) - if err := c.Do(args, commandEnv, os.Stdout); err != nil { - glog.V(0).Infof("error: %v", err) - } - } + for _, c := range strings.Split(line, ";") { + processEachCmd(reg, c, commandEnv) } } } @@ -248,9 +241,30 @@ func (ms *MasterServer) startAdminScripts() { }() } +func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEnv) { + cmds := reg.FindAllString(line, -1) + if len(cmds) == 0 { + return + } + args := make([]string, len(cmds[1:])) + for i := range args { + args[i] = strings.Trim(string(cmds[1+i]), "\"'") + } + cmd := strings.ToLower(cmds[0]) + + for _, c := range shell.Commands { + if c.Name() == cmd { + glog.V(0).Infof("executing: %s %v", cmd, args) + if err := c.Do(args, commandEnv, os.Stdout); err != nil { + glog.V(0).Infof("error: %v", err) + } + } + } +} + func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer { var seq sequence.Sequencer - v := viper.GetViper() + v := util.GetViper() seqType := strings.ToLower(v.GetString(SequencerType)) glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) switch strings.ToLower(seqType) { @@ -263,6 +277,13 @@ func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer glog.Error(err) seq = nil } + case "snowflake": + var err error + seq, err = sequence.NewSnowflakeSequencer(fmt.Sprintf("%s:%d", option.Host, option.Port)) + if err != nil { + glog.Error(err) + seq = nil + } default: seq = sequence.NewMemorySequencer() } diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index 514d86800..a9fecc5bd 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -45,7 +45,7 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) vid = fileId[0:commaSep] } } - collection := r.FormValue("collection") //optional, but can be faster if too many collections + collection := r.FormValue("collection") // optional, but can be faster if too many collections location := ms.findVolumeLocation(collection, vid) httpStatus := http.StatusOK if location.Error != "" || location.Locations == nil { @@ -72,9 +72,6 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo for _, loc := range machines { locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl}) } - if locations == nil { - err = fmt.Errorf("volume id %s not found", vid) - } } } else { machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid) @@ -83,6 +80,9 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo } err = getVidLocationsErr } + if len(locations) == 0 && err == nil { + err = fmt.Errorf("volume id %s not found", vid) + } ret := operation.LookupResult{ VolumeId: vid, Locations: locations, @@ -112,8 +112,8 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) } if !ms.Topo.HasWritableVolume(option) { - if ms.Topo.FreeSpace() <= 0 { - writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left!"}) + if ms.Topo.AvailableSpaceFor(option) <= 0 { + writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()}) return } ms.vgLock.Lock() @@ -136,6 +136,9 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) } func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string, isWrite bool) { + if fileId == "" { + return + } var encodedJwt security.EncodedJwt if isWrite { encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId) diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 2965a4863..f24d4e924 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -3,6 +3,7 @@ package weed_server import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "math/rand" "net/http" "strconv" @@ -44,7 +45,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Topology"] = ms.Topo.ToMap() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -61,7 +62,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque return } } - glog.Infoln("garbageThreshold =", gcThreshold) + // glog.Infoln("garbageThreshold =", gcThreshold) ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize) ms.dirStatusHandler(w, r) } @@ -75,8 +76,8 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request } if count, err = strconv.Atoi(r.FormValue("count")); err == nil { - if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { - err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) + if ms.Topo.AvailableSpaceFor(option) < int64(count*option.ReplicaPlacement.GetCopyCount()) { + err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.AvailableSpaceFor(option), count*option.ReplicaPlacement.GetCopyCount()) } else { count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo) } @@ -93,7 +94,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Volumes"] = ms.Topo.ToVolumeMap() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -110,7 +111,7 @@ func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request) } else { url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path } - http.Redirect(w, r, url, http.StatusMovedPermanently) + http.Redirect(w, r, url, http.StatusPermanentRedirect) } else { writeJsonError(w, r, http.StatusNotFound, fmt.Errorf("volume id %s not found: %s", vid, location.Error)) } @@ -124,19 +125,19 @@ func (ms *MasterServer) selfUrl(r *http.Request) string { } func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { - submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption) + submitForClientHandler(w, r, func() string { return ms.selfUrl(r) }, ms.grpcDialOption) } else { masterUrl, err := ms.Topo.Leader() if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else { - submitForClientHandler(w, r, masterUrl, ms.grpcDialOption) + submitForClientHandler(w, r, func() string { return masterUrl }, ms.grpcDialOption) } } } func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) bool { - vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl) + vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType) return vl.GetActiveVolumeCount(option) > 0 } @@ -157,6 +158,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr if err != nil { return nil, err } + diskType := types.ToDiskType(r.FormValue("disk")) preallocate := ms.preallocateSize if r.FormValue("preallocate") != "" { @@ -169,6 +171,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr Collection: r.FormValue("collection"), ReplicaPlacement: replicaPlacement, Ttl: ttl, + DiskType: diskType, Prealloacte: preallocate, DataCenter: r.FormValue("dataCenter"), Rack: r.FormValue("rack"), diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go index f241df87f..3822c6113 100644 --- a/weed/server/master_server_handlers_ui.go +++ b/weed/server/master_server_handlers_ui.go @@ -2,6 +2,7 @@ package weed_server import ( "net/http" + "time" "github.com/chrislusf/raft" ui "github.com/chrislusf/seaweedfs/weed/server/master_ui" @@ -11,19 +12,21 @@ import ( func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) { infos := make(map[string]interface{}) - infos["Version"] = util.VERSION + infos["Up Time"] = time.Now().Sub(startTime).String() args := struct { - Version string - Topology interface{} - RaftServer raft.Server - Stats map[string]interface{} - Counters *stats.ServerStats + Version string + Topology interface{} + RaftServer raft.Server + Stats map[string]interface{} + Counters *stats.ServerStats + VolumeSizeLimitMB uint }{ - util.VERSION, + util.Version(), ms.Topo.ToMap(), ms.Topo.RaftServer, infos, serverStats, + ms.option.VolumeSizeLimitMB, } ui.StatusTpl.Execute(w, args) } diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index b674e3f82..31b6353e9 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -22,9 +22,13 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html> <div class="row"> <div class="col-sm-6"> <h2>Cluster status</h2> - <table class="table"> + <table class="table table-condensed table-striped"> <tbody> <tr> + <th>Volume Size Limit</th> + <td>{{ .VolumeSizeLimitMB }}MB</td> + </tr> + <tr> <th>Free</th> <td>{{ .Topology.Free }}</td> </tr> @@ -38,8 +42,8 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html> <td><a href="http://{{ .Leader }}">{{ .Leader }}</a></td> </tr> <tr> - <td class="col-sm-2 field-label"><label>Other Masters:</label></td> - <td class="col-sm-10"><ul class="list-unstyled"> + <th>Other Masters</th> + <td class="col-sm-5"><ul class="list-unstyled"> {{ range $k, $p := .Peers }} <li><a href="http://{{ $p.Name }}/ui/index.html">{{ $p.Name }}</a></li> {{ end }} @@ -76,6 +80,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html> <th>Rack</th> <th>RemoteAddr</th> <th>#Volumes</th> + <th>Volume Ids</th> <th>#ErasureCodingShards</th> <th>Max</th> </tr> @@ -87,8 +92,13 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html> <tr> <td><code>{{ $dc.Id }}</code></td> <td>{{ $rack.Id }}</td> - <td><a href="http://{{ $dn.Url }}/ui/index.html">{{ $dn.Url }}</a></td> + <td><a href="http://{{ $dn.Url }}/ui/index.html">{{ $dn.Url }}</a> + {{ if ne $dn.PublicUrl $dn.Url }} + / <a href="http://{{ $dn.PublicUrl }}/ui/index.html">{{ $dn.PublicUrl }}</a> + {{ end }} + </td> <td>{{ $dn.Volumes }}</td> + <td>{{ $dn.VolumeIds}}</td> <td>{{ $dn.EcShards }}</td> <td>{{ $dn.Max }}</td> </tr> diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 53289f1c1..85841e409 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -2,16 +2,18 @@ package weed_server import ( "encoding/json" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "io/ioutil" + "math/rand" "os" "path" - "reflect" "sort" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -25,7 +27,31 @@ type RaftServer struct { *raft.GrpcServer } -func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { +type StateMachine struct { + raft.StateMachine + topo *topology.Topology +} + +func (s StateMachine) Save() ([]byte, error) { + state := topology.MaxVolumeIdCommand{ + MaxVolumeId: s.topo.GetMaxVolumeId(), + } + glog.V(1).Infof("Save raft state %+v", state) + return json.Marshal(state) +} + +func (s StateMachine) Recovery(data []byte) error { + state := topology.MaxVolumeIdCommand{} + err := json.Unmarshal(data, &state) + if err != nil { + return err + } + glog.V(1).Infof("Recovery raft state %+v", state) + s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId) + return nil +} + +func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, raftResumeState bool) (*RaftServer, error) { s := &RaftServer{ peers: peers, serverAddr: serverAddr, @@ -43,47 +69,66 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d transporter := raft.NewGrpcTransporter(grpcDialOption) glog.V(0).Infof("Starting RaftServer with %v", serverAddr) - // Clear old cluster configurations if peers are changed - if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed { - glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers) + if !raftResumeState { + // always clear previous metadata os.RemoveAll(path.Join(s.dataDir, "conf")) os.RemoveAll(path.Join(s.dataDir, "log")) os.RemoveAll(path.Join(s.dataDir, "snapshot")) } + if err := os.MkdirAll(path.Join(s.dataDir, "snapshot"), 0600); err != nil { + return nil, err + } - s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "") + stateMachine := StateMachine{topo: topo} + s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, stateMachine, topo, "") if err != nil { glog.V(0).Infoln(err) - return nil + return nil, err + } + s.raftServer.SetHeartbeatInterval(time.Duration(300+rand.Intn(150)) * time.Millisecond) + s.raftServer.SetElectionTimeout(10 * time.Second) + if err := s.raftServer.LoadSnapshot(); err != nil { + return nil, err + } + if err := s.raftServer.Start(); err != nil { + return nil, err } - s.raftServer.SetHeartbeatInterval(500 * time.Millisecond) - s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond) - s.raftServer.Start() for _, peer := range s.peers { - s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer)) + if err := s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer)); err != nil { + return nil, err + } + } + + // Remove deleted peers + for existsPeerName := range s.raftServer.Peers() { + exists, existingPeer := false, "" + for _, peer := range s.peers { + if pb.ServerToGrpcAddress(peer) == existsPeerName { + exists, existingPeer = true, peer + break + } + } + if exists { + if err := s.raftServer.RemovePeer(existsPeerName); err != nil { + glog.V(0).Infoln(err) + return nil, err + } else { + glog.V(0).Infof("removing old peer %s", existingPeer) + } + } } s.GrpcServer = raft.NewGrpcServer(s.raftServer) if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) { // Initialize the server by joining itself. - glog.V(0).Infoln("Initializing new cluster") - - _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ - Name: s.raftServer.Name(), - ConnectionString: util.ServerToGrpcAddress(s.serverAddr), - }) - - if err != nil { - glog.V(0).Infoln(err) - return nil - } + // s.DoJoinCommand() } glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader()) - return s + return s, nil } func (s *RaftServer) Peers() (members []string) { @@ -96,34 +141,6 @@ func (s *RaftServer) Peers() (members []string) { return } -func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, changed bool) { - confPath := path.Join(dir, "conf") - // open conf file - b, err := ioutil.ReadFile(confPath) - if err != nil { - return oldPeers, true - } - conf := &raft.Config{} - if err = json.Unmarshal(b, conf); err != nil { - return oldPeers, true - } - - for _, p := range conf.Peers { - oldPeers = append(oldPeers, p.Name) - } - oldPeers = append(oldPeers, self) - - if len(peers) == 0 && len(oldPeers) <= 1 { - return oldPeers, false - } - - sort.Strings(peers) - sort.Strings(oldPeers) - - return oldPeers, !reflect.DeepEqual(peers, oldPeers) - -} - func isTheFirstOne(self string, peers []string) bool { sort.Strings(peers) if len(peers) <= 0 { @@ -131,3 +148,16 @@ func isTheFirstOne(self string, peers []string) bool { } return self == peers[0] } + +func (s *RaftServer) DoJoinCommand() { + + glog.V(0).Infoln("Initializing new cluster") + + if _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ + Name: s.raftServer.Name(), + ConnectionString: pb.ServerToGrpcAddress(s.serverAddr), + }); err != nil { + glog.Errorf("fail to send join command: %v", err) + } + +} diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go index fd38cb977..252570eab 100644 --- a/weed/server/raft_server_handlers.go +++ b/weed/server/raft_server_handlers.go @@ -1,20 +1,24 @@ package weed_server import ( + "github.com/chrislusf/seaweedfs/weed/storage/needle" "net/http" ) type ClusterStatusResult struct { - IsLeader bool `json:"IsLeader,omitempty"` - Leader string `json:"Leader,omitempty"` - Peers []string `json:"Peers,omitempty"` + IsLeader bool `json:"IsLeader,omitempty"` + Leader string `json:"Leader,omitempty"` + Peers []string `json:"Peers,omitempty"` + MaxVolumeId needle.VolumeId `json:"MaxVolumeId,omitempty"` } func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) { ret := ClusterStatusResult{ - IsLeader: s.topo.IsLeader(), - Peers: s.Peers(), + IsLeader: s.topo.IsLeader(), + Peers: s.Peers(), + MaxVolumeId: s.topo.GetMaxVolumeId(), } + if leader, e := s.topo.Leader(); e == nil { ret.Leader = leader } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index c631d2535..2bc108a23 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -2,10 +2,15 @@ package weed_server import ( "context" + "fmt" + "path/filepath" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -36,6 +41,7 @@ func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_p req.Ttl, req.Preallocate, req.MemoryMapMaxSizeMb, + types.ToDiskType(req.DiskType), ) if err != nil { @@ -96,6 +102,41 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. } +func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) { + + resp := &volume_server_pb.VolumeConfigureResponse{} + + // check replication format + if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil { + resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err) + return resp, nil + } + + // unmount + if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure unmount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) + return resp, nil + } + + // modify the volume info file + if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { + glog.Errorf("volume configure %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) + return resp, nil + } + + // mount + if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure mount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) + return resp, nil + } + + return resp, nil + +} + func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { resp := &volume_server_pb.VolumeMarkReadonlyResponse{} @@ -109,5 +150,100 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv } return resp, err +} + +func (vs *VolumeServer) VolumeMarkWritable(ctx context.Context, req *volume_server_pb.VolumeMarkWritableRequest) (*volume_server_pb.VolumeMarkWritableResponse, error) { + + resp := &volume_server_pb.VolumeMarkWritableResponse{} + + err := vs.store.MarkVolumeWritable(needle.VolumeId(req.VolumeId)) + + if err != nil { + glog.Errorf("volume mark writable %v: %v", req, err) + } else { + glog.V(2).Infof("volume mark writable %v", req) + } + + return resp, err +} + +func (vs *VolumeServer) VolumeStatus(ctx context.Context, req *volume_server_pb.VolumeStatusRequest) (*volume_server_pb.VolumeStatusResponse, error) { + + resp := &volume_server_pb.VolumeStatusResponse{} + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.IsReadOnly = v.IsReadOnly() + + return resp, nil +} + +func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) { + + resp := &volume_server_pb.VolumeServerStatusResponse{} + + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir)) + } + } + + resp.MemoryStatus = stats.MemStat() + + return resp, nil + +} + +func (vs *VolumeServer) VolumeServerLeave(ctx context.Context, req *volume_server_pb.VolumeServerLeaveRequest) (*volume_server_pb.VolumeServerLeaveResponse, error) { + + resp := &volume_server_pb.VolumeServerLeaveResponse{} + + vs.StopHeartbeat() + + return resp, nil + +} + +func (vs *VolumeServer) VolumeNeedleStatus(ctx context.Context, req *volume_server_pb.VolumeNeedleStatusRequest) (*volume_server_pb.VolumeNeedleStatusResponse, error) { + + resp := &volume_server_pb.VolumeNeedleStatusResponse{} + + volumeId := needle.VolumeId(req.VolumeId) + + n := &needle.Needle{ + Id: types.NeedleId(req.NeedleId), + } + + var count int + var err error + hasVolume := vs.store.HasVolume(volumeId) + if !hasVolume { + _, hasEcVolume := vs.store.FindEcVolume(volumeId) + if !hasEcVolume { + return nil, fmt.Errorf("volume not found %d", req.VolumeId) + } + count, err = vs.store.ReadEcShardNeedle(volumeId, n) + } else { + count, err = vs.store.ReadVolumeNeedle(volumeId, n, nil) + } + if err != nil { + return nil, err + } + if count < 0 { + return nil, fmt.Errorf("needle not found %d", n.Id) + } + + resp.NeedleId = uint64(n.Id) + resp.Cookie = uint32(n.Cookie) + resp.Size = uint32(n.Size) + resp.LastModified = n.LastModified + resp.Crc = n.Checksum.Value() + if n.HasTtl() { + resp.Ttl = n.Ttl.String() + } + return resp, nil } diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go index fdb7937d2..8e84dc2a8 100644 --- a/weed/server/volume_grpc_batch_delete.go +++ b/weed/server/volume_grpc_batch_delete.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) { @@ -28,16 +29,34 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B n := new(needle.Needle) volumeId, _ := needle.NewVolumeId(vid) - n.ParsePath(id_cookie) - - cookie := n.Cookie - if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusNotFound, - Error: err.Error(), - }) - continue + if req.SkipCookieCheck { + n.Id, err = types.ParseNeedleId(id_cookie) + if err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusBadRequest, + Error: err.Error()}) + continue + } + } else { + n.ParsePath(id_cookie) + cookie := n.Cookie + if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusNotFound, + Error: err.Error(), + }) + continue + } + if n.Cookie != cookie { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusBadRequest, + Error: "File Random Cookie does not match.", + }) + break + } } if n.IsChunkedManifest() { @@ -49,14 +68,6 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B continue } - if n.Cookie != cookie { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusBadRequest, - Error: "File Random Cookie does not match.", - }) - break - } n.LastModified = now if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil { resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ @@ -68,7 +79,7 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ FileId: fid, Status: http.StatusAccepted, - Size: size}, + Size: uint32(size)}, ) } } diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 6038752d2..f8875169f 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -2,59 +2,104 @@ package weed_server import ( "fmt" - "net" + "github.com/chrislusf/seaweedfs/weed/operation" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" - "github.com/spf13/viper" - "google.golang.org/grpc" + + "golang.org/x/net/context" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/context" ) func (vs *VolumeServer) GetMaster() string { return vs.currentMaster } + +func (vs *VolumeServer) checkWithMaster() (err error) { + isConnected := false + for !isConnected { + for _, master := range vs.SeedMasterNodes { + err = operation.WithMasterServerClient(master, vs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", master, err) + } + vs.metricsAddress, vs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + backend.LoadFromPbStorageBackends(resp.StorageBackends) + return nil + }) + if err == nil { + return + } else { + glog.V(0).Infof("checkWithMaster %s: %v", master, err) + } + } + time.Sleep(1790 * time.Millisecond) + } + return +} + func (vs *VolumeServer) heartbeat() { glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes) vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume") var err error var newLeader string - for { + for vs.isHeartbeating { for _, master := range vs.SeedMasterNodes { if newLeader != "" { + // the new leader may actually is the same master + // need to wait a bit before adding itself + time.Sleep(3 * time.Second) master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master) if parseErr != nil { glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue } vs.store.MasterAddress = master - newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) + newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) newLeader = "" vs.store.MasterAddress = "" } + if !vs.isHeartbeating { + break + } } } } -func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) StopHeartbeat() (isAlreadyStopping bool) { + if !vs.isHeartbeating { + return true + } + vs.isHeartbeating = false + close(vs.stopChan) + return false +} + +func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) + grpcConection, err := pb.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } @@ -78,22 +123,20 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA doneChan <- err return } - if in.GetVolumeSizeLimit() != 0 { + if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() { vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit()) - } - if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) { - glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) + if vs.store.MaybeAdjustVolumeMax() { + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", vs.currentMaster, err) + } + } + } + if in.GetLeader() != "" && vs.currentMaster != in.GetLeader() { + glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), vs.currentMaster) newLeader = in.GetLeader() doneChan <- nil return } - if in.GetMetricsAddress() != "" && vs.MetricsAddress != in.GetMetricsAddress() { - vs.MetricsAddress = in.GetMetricsAddress() - vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds()) - } - if len(in.StorageBackends) > 0 { - backend.LoadFromPbStorageBackends(in.StorageBackends) - } } }() @@ -160,6 +203,7 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA } case <-volumeTickChan: glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) + vs.store.MaybeAdjustVolumeMax() if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) return "", err @@ -172,19 +216,24 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA } case err = <-doneChan: return + case <-vs.stopChan: + var volumeMessages []*master_pb.VolumeInformationMessage + emptyBeat := &master_pb.Heartbeat{ + Ip: vs.store.Ip, + Port: uint32(vs.store.Port), + PublicUrl: vs.store.PublicUrl, + MaxFileKey: uint64(0), + DataCenter: vs.store.GetDataCenter(), + Rack: vs.store.GetRack(), + Volumes: volumeMessages, + HasNoVolumes: len(volumeMessages) == 0, + } + glog.V(1).Infof("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port) + if err = stream.Send(emptyBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) + return "", err + } + return } } } - -func isSameIP(ip string, host string) bool { - ips, err := net.LookupIP(host) - if err != nil { - return false - } - for _, t := range ips { - if ip == t.String() { - return true - } - } - return false -} diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index a54a1e343..cf9f9f777 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -3,10 +3,11 @@ package weed_server import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io" + "io/ioutil" "math" "os" - "path" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -20,17 +21,20 @@ import ( const BufferSizeLimit = 1024 * 1024 * 2 -// VolumeCopy copy the .idx .dat files, and mount the volume +// VolumeCopy copy the .idx .dat .vif files, and mount the volume func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) if v != nil { - return nil, fmt.Errorf("volume %d already exists", req.VolumeId) - } - location := vs.store.FindFreeLocation() - if location == nil { - return nil, fmt.Errorf("no space left") + glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId) + + err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err) + } + + glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId) } // the master will not start compaction for read-only volumes, so it is safe to just copy files directly @@ -40,10 +44,10 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // send .dat file // confirm size and timestamp var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse - var volumeFileName, idxFileName, datFileName string + var dataBaseFileName, indexBaseFileName, idxFileName, datFileName string err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { var err error - volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, + volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(), &volume_server_pb.ReadVolumeFileStatusRequest{ VolumeId: req.VolumeId, }) @@ -51,33 +55,55 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo return fmt.Errorf("read volume file status failed, %v", err) } - volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) + diskType := volFileInfoResp.DiskType + if req.DiskType != "" { + diskType = req.DiskType + } + location := vs.store.FindFreeLocation(types.ToDiskType(diskType)) + if location == nil { + return fmt.Errorf("no space left for disk type %s", types.ToDiskType(diskType).ReadableString()) + } + + dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) + indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId)) + + ioutil.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755) // println("source:", volFileInfoResp.String()) - // copy ecx file - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".dat", false, true); err != nil { + return err + } + + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false); err != nil { return err } - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true); err != nil { return err } + os.Remove(dataBaseFileName + ".note") + return nil }) - idxFileName = volumeFileName + ".idx" - datFileName = volumeFileName + ".dat" + if err != nil { + return nil, err + } + if dataBaseFileName == "" { + return nil, fmt.Errorf("not found volume %d file", req.VolumeId) + } + + idxFileName = indexBaseFileName + ".idx" + datFileName = dataBaseFileName + ".dat" - if err != nil && volumeFileName != "" { - if idxFileName != "" { + defer func() { + if err != nil && dataBaseFileName != "" { os.Remove(idxFileName) - } - if datFileName != "" { os.Remove(datFileName) + os.Remove(dataBaseFileName + ".vif") } - return nil, err - } + }() if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16 return nil, err @@ -94,10 +120,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo }, err } -func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32, - compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error { +func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error { - copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ VolumeId: vid, Ext: ext, CompactionRevision: compactRevision, @@ -186,6 +211,7 @@ func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_se resp.FileCount = v.FileCount() resp.CompactionRevision = uint32(v.CompactionRevision) resp.Collection = v.Collection + resp.DiskType = string(v.DiskType()) return resp, nil } @@ -204,11 +230,15 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 { return fmt.Errorf("volume %d is compacted", req.VolumeId) } - fileName = v.FileName() + req.Ext + fileName = v.FileName(req.Ext) } else { baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext for _, location := range vs.store.Locations { - tName := path.Join(location.Directory, baseFileName) + tName := util.Join(location.Directory, baseFileName) + if util.FileExists(tName) { + fileName = tName + } + tName = util.Join(location.IdxDirectory, baseFileName) if util.FileExists(tName) { fileName = tName } diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go index 6d6c3daa3..82b143c3d 100644 --- a/weed/server/volume_grpc_copy_incremental.go +++ b/weed/server/volume_grpc_copy_incremental.go @@ -27,7 +27,7 @@ func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrem return nil } - startOffset := foundOffset.ToAcutalOffset() + startOffset := foundOffset.ToActualOffset() buf := make([]byte, 1024*1024*2) return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream) diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 4bca9948e..452c2766e 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -8,7 +8,6 @@ import ( "math" "os" "path" - "path/filepath" "strings" "github.com/chrislusf/seaweedfs/weed/glog" @@ -27,7 +26,7 @@ import ( Steps to apply erasure coding to .dat .idx files 0. ensure the volume is readonly 1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files -2. client ask master for possible servers to hold the ec files, at least 4 servers +2. client ask master for possible servers to hold the ec files 3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server 4. target servers report the new ec files to the master 5. master stores vid -> [14]*DataNode @@ -38,26 +37,28 @@ Steps to apply erasure coding to .dat .idx files // VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { + glog.V(0).Infof("VolumeEcShardsGenerate: %v", req) + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) if v == nil { return nil, fmt.Errorf("volume %d not found", req.VolumeId) } - baseFileName := v.FileName() + baseFileName := v.DataFileName() if v.Collection != req.Collection { return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } - // write .ecx file - if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil { - return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err) - } - // write .ec00 ~ .ec13 files if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) } + // write .ecx file + if err := erasure_coding.WriteSortedFileFromIdx(v.IndexFileName(), ".ecx"); err != nil { + return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", v.IndexFileName(), err) + } + // write .vif files if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil { return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) @@ -69,22 +70,25 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ // VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { + glog.V(0).Infof("VolumeEcShardsRebuild: %v", req) + baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) var rebuiltShardIds []uint32 for _, location := range vs.store.Locations { - if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) { + if util.FileExists(path.Join(location.IdxDirectory, baseFileName+".ecx")) { // write .ec00 ~ .ec13 files - baseFileName = path.Join(location.Directory, baseFileName) - if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil { - return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err) + dataBaseFileName := path.Join(location.Directory, baseFileName) + if generatedShardIds, err := erasure_coding.RebuildEcFiles(dataBaseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcFiles %s: %v", dataBaseFileName, err) } else { rebuiltShardIds = generatedShardIds } - if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil { - return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err) + indexBaseFileName := path.Join(location.IdxDirectory, baseFileName) + if err := erasure_coding.RebuildEcxFile(indexBaseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcxFile %s: %v", dataBaseFileName, err) } break @@ -99,18 +103,21 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s // VolumeEcShardsCopy copy the .ecx and some ec data slices func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) { - location := vs.store.FindFreeLocation() + glog.V(0).Infof("VolumeEcShardsCopy: %v", req) + + location := vs.store.FindFreeLocation(types.HardDriveType) if location == nil { return nil, fmt.Errorf("no space left") } - baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) + dataBaseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) + indexBaseFileName := storage.VolumeFileName(location.IdxDirectory, req.Collection, int(req.VolumeId)) err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy ec data slices for _, shardId := range req.ShardIds { - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { return err } } @@ -118,7 +125,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv if req.CopyEcxFile { // copy ecx file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecx", false, false); err != nil { return err } return nil @@ -126,14 +133,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv if req.CopyEcjFile { // copy ecj file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecj", true, true); err != nil { return err } } if req.CopyVifFile { // copy vif file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, ".vif", false, true); err != nil { return err } } @@ -151,17 +158,19 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv // the shard should not be mounted before calling this. func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) { - baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + bName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds) found := false + var indexBaseFilename, dataBaseFilename string for _, location := range vs.store.Locations { - if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) { + if util.FileExists(path.Join(location.IdxDirectory, bName+".ecx")) { found = true - baseFilename = path.Join(location.Directory, baseFilename) + indexBaseFilename = path.Join(location.IdxDirectory, bName) + dataBaseFilename = path.Join(location.Directory, bName) for _, shardId := range req.ShardIds { - os.Remove(baseFilename + erasure_coding.ToExt(int(shardId))) + os.Remove(dataBaseFilename + erasure_coding.ToExt(int(shardId))) } break } @@ -173,19 +182,30 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se // check whether to delete the .ecx and .ecj file also hasEcxFile := false + hasIdxFile := false existingShardCount := 0 - bName := filepath.Base(baseFilename) for _, location := range vs.store.Locations { fileInfos, err := ioutil.ReadDir(location.Directory) if err != nil { continue } + if location.IdxDirectory != location.Directory { + idxFileInfos, err := ioutil.ReadDir(location.IdxDirectory) + if err != nil { + continue + } + fileInfos = append(fileInfos, idxFileInfos...) + } for _, fileInfo := range fileInfos { if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" { hasEcxFile = true continue } + if fileInfo.Name() == bName+".idx" { + hasIdxFile = true + continue + } if strings.HasPrefix(fileInfo.Name(), bName+".ec") { existingShardCount++ } @@ -193,12 +213,14 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se } if hasEcxFile && existingShardCount == 0 { - if err := os.Remove(baseFilename + ".ecx"); err != nil { - return nil, err - } - if err := os.Remove(baseFilename + ".ecj"); err != nil { + if err := os.Remove(indexBaseFilename + ".ecx"); err != nil { return nil, err } + os.Remove(indexBaseFilename + ".ecj") + } + if !hasIdxFile { + // .vif is used for ec volumes and normal volumes + os.Remove(dataBaseFilename + ".vif") } return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil @@ -206,6 +228,8 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) { + glog.V(0).Infof("VolumeEcShardsMount: %v", req) + for _, shardId := range req.ShardIds { err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) @@ -225,6 +249,8 @@ func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_ser func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) { + glog.V(0).Infof("VolumeEcShardsUnmount: %v", req) + for _, shardId := range req.ShardIds { err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) @@ -255,7 +281,7 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea if req.FileKey != 0 { _, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey)) - if size == types.TombstoneFileSize { + if size.IsDeleted() { return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{ IsDeleted: true, }) @@ -312,6 +338,8 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) { + glog.V(0).Infof("VolumeEcBlobDelete: %v", req) + resp := &volume_server_pb.VolumeEcBlobDeleteResponse{} for _, location := range vs.store.Locations { @@ -321,7 +349,7 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv if err != nil { return nil, fmt.Errorf("locate in local ec volume: %v", err) } - if size == types.TombstoneFileSize { + if size.IsDeleted() { return resp, nil } @@ -340,30 +368,32 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv // VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) { + glog.V(0).Infof("VolumeEcShardsToVolume: %v", req) + v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) if !found { return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) } - baseFileName := v.FileName() if v.Collection != req.Collection { return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) } + dataBaseFileName, indexBaseFileName := v.DataBaseFileName(), v.IndexBaseFileName() // calculate .dat file size - datFileSize, err := erasure_coding.FindDatFileSize(baseFileName) + datFileSize, err := erasure_coding.FindDatFileSize(dataBaseFileName, indexBaseFileName) if err != nil { - return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err) + return nil, fmt.Errorf("FindDatFileSize %s: %v", dataBaseFileName, err) } // write .dat file from .ec00 ~ .ec09 files - if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil { - return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + if err := erasure_coding.WriteDatFile(dataBaseFileName, datFileSize); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", dataBaseFileName, err) } // write .idx file from .ecx and .ecj files - if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil { - return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err) + if err := erasure_coding.WriteIdxFileFromEcIndex(indexBaseFileName); err != nil { + return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", v.IndexBaseFileName(), err) } return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil diff --git a/weed/server/volume_grpc_query.go b/weed/server/volume_grpc_query.go index 767e28e7b..2f4fab96a 100644 --- a/weed/server/volume_grpc_query.go +++ b/weed/server/volume_grpc_query.go @@ -24,7 +24,7 @@ func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_ n.ParsePath(id_cookie) cookie := n.Cookie - if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { + if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil); err != nil { glog.V(0).Infof("volume query failed to read fid %s: %v", fid, err) return err } diff --git a/weed/server/volume_grpc_read_write.go b/weed/server/volume_grpc_read_write.go new file mode 100644 index 000000000..988e9e145 --- /dev/null +++ b/weed/server/volume_grpc_read_write.go @@ -0,0 +1,38 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func (vs *VolumeServer) ReadNeedleBlob(ctx context.Context, req *volume_server_pb.ReadNeedleBlobRequest) (resp *volume_server_pb.ReadNeedleBlobResponse, err error) { + resp = &volume_server_pb.ReadNeedleBlobResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.NeedleBlob, err = v.ReadNeedleBlob(req.Offset, types.Size(req.Size)) + if err != nil { + return nil, fmt.Errorf("read needle blob offset %d size %d: %v", req.Offset, req.Size, err) + } + + return resp, nil +} + +func (vs *VolumeServer) WriteNeedleBlob(ctx context.Context, req *volume_server_pb.WriteNeedleBlobRequest) (resp *volume_server_pb.WriteNeedleBlobResponse, err error) { + resp = &volume_server_pb.WriteNeedleBlobResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + if err = v.WriteNeedleBlob(types.NeedleId(req.NeedleId), req.NeedleBlob, types.Size(req.Size)); err != nil { + return nil, fmt.Errorf("write blob needle %d size %d: %v", req.NeedleId, req.Size, err) + } + + return resp, nil +} diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go index c26d6ed8f..6c039ebf5 100644 --- a/weed/server/volume_grpc_tail.go +++ b/weed/server/volume_grpc_tail.go @@ -72,7 +72,7 @@ func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServe stream: stream, } - err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner) + err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToActualOffset(), scanner) return scanner.lastProcessedTimestampNs, err @@ -90,7 +90,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { - _, _, err := vs.store.WriteVolumeNeedle(v.Id, n) + _, err := vs.store.WriteVolumeNeedle(v.Id, n, false) return err }) diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go index 7b3982e40..73d8ae7cb 100644 --- a/weed/server/volume_grpc_tier_download.go +++ b/weed/server/volume_grpc_tier_download.go @@ -58,9 +58,9 @@ func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.Volume }) } // copy the data file - _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn) + _, err := backendStorage.DownloadFile(v.FileName(".dat"), storageKey, fn) if err != nil { - return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err) + return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName(".dat"), err) } if req.KeepRemoteDatFile { diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go index c9694df59..e51de5f1d 100644 --- a/weed/server/volume_grpc_tier_upload.go +++ b/weed/server/volume_grpc_tier_upload.go @@ -93,7 +93,7 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi } if !req.KeepLocalDatFile { - os.Remove(v.FileName() + ".dat") + os.Remove(v.FileName(".dat")) } return nil diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index 24f982241..b87de4b5b 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -51,6 +51,11 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv } else { glog.V(1).Infof("commit volume %d", req.VolumeId) } + if err == nil { + if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() { + resp.IsReadOnly = true + } + } return resp, err diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 6cf654738..e11d607a4 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -2,15 +2,18 @@ package weed_server import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "net/http" + "sync" - "github.com/chrislusf/seaweedfs/weed/stats" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/spf13/viper" ) type VolumeServer struct { @@ -23,27 +26,37 @@ type VolumeServer struct { guard *security.Guard grpcDialOption grpc.DialOption - needleMapKind storage.NeedleMapType + needleMapKind storage.NeedleMapKind FixJpgOrientation bool ReadRedirect bool compactionBytePerSecond int64 - MetricsAddress string - MetricsIntervalSec int + metricsAddress string + metricsIntervalSec int + fileSizeLimitBytes int64 + isHeartbeating bool + stopChan chan bool + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond + concurrentUploadLimit int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, port int, publicUrl string, - folders []string, maxCounts []int, - needleMapKind storage.NeedleMapType, + folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []types.DiskType, + idxFolder string, + needleMapKind storage.NeedleMapKind, masterNodes []string, pulseSeconds int, dataCenter string, rack string, whiteList []string, fixJpgOrientation bool, readRedirect bool, compactionMBPerSecond int, + fileSizeLimitMB int, + concurrentUploadLimit int64, ) *VolumeServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -60,22 +73,31 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, needleMapKind: needleMapKind, FixJpgOrientation: fixJpgOrientation, ReadRedirect: readRedirect, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"), compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, + isHeartbeating: true, + stopChan: make(chan bool), + inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), + concurrentUploadLimit: concurrentUploadLimit, } vs.SeedMasterNodes = masterNodes - vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) + vs.checkWithMaster() + + vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind, diskTypes) vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) handleStaticResources(adminMux) + adminMux.HandleFunc("/status", vs.statusHandler) if signingKey == "" || enableUiAccess { // only expose the volume server details for safe environments adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) - adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + /* + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + */ } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { @@ -85,11 +107,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, } go vs.heartbeat() - hostAddress := fmt.Sprintf("%s:%d", ip, port) - go stats.LoopPushingMetric("volumeServer", hostAddress, stats.VolumeServerGather, - func() (addr string, intervalSeconds int) { - return vs.MetricsAddress, vs.MetricsIntervalSec - }) + go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), vs.metricsAddress, vs.metricsIntervalSec) return vs } diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 14ad27d42..4527add44 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -2,7 +2,11 @@ package weed_server import ( "net/http" + "strconv" "strings" + "sync/atomic" + + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -25,6 +29,11 @@ security settings: */ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { case "GET", "HEAD": stats.ReadRequest() @@ -33,12 +42,49 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque stats.DeleteRequest() vs.guard.WhiteList(vs.DeleteHandler)(w, r) case "PUT", "POST": + + // wait until in flight data is less than the limit + contentLength := getContentLength(r) + vs.inFlightDataLimitCond.L.Lock() + for atomic.LoadInt64(&vs.inFlightDataSize) > vs.concurrentUploadLimit { + vs.inFlightDataLimitCond.Wait() + } + atomic.AddInt64(&vs.inFlightDataSize, contentLength) + vs.inFlightDataLimitCond.L.Unlock() + defer func() { + atomic.AddInt64(&vs.inFlightDataSize, -contentLength) + vs.inFlightDataLimitCond.Signal() + }() + + // processs uploads stats.WriteRequest() vs.guard.WhiteList(vs.PostHandler)(w, r) + + case "OPTIONS": + stats.ReadRequest() + w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") + w.Header().Add("Access-Control-Allow-Headers", "*") } } +func getContentLength(r *http.Request) int64 { + contentLength := r.Header.Get("Content-Length") + if contentLength != "" { + length, err := strconv.ParseInt(contentLength, 10, 64) + if err != nil { + return 0 + } + return length + } + return 0 +} + func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { case "GET": stats.ReadRequest() @@ -46,6 +92,10 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req case "HEAD": stats.ReadRequest() vs.GetOrHeadHandler(w, r) + case "OPTIONS": + stats.ReadRequest() + w.Header().Add("Access-Control-Allow-Methods", "GET, OPTIONS") + w.Header().Add("Access-Control-Allow-Headers", "*") } } diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index 1938a34c4..7e6c06871 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -10,19 +10,32 @@ import ( ) func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() + var ds []*volume_server_pb.DiskStatus + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + newDiskStatus := stats.NewDiskStatus(dir) + newDiskStatus.DiskType = loc.DiskType.String() + ds = append(ds, newDiskStatus) + } + } + m["DiskStatuses"] = ds m["Volumes"] = vs.store.VolumeInfos() writeJsonQuiet(w, r, http.StatusOK, m) } func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() var ds []*volume_server_pb.DiskStatus for _, loc := range vs.store.Locations { if dir, e := filepath.Abs(loc.Directory); e == nil { - ds = append(ds, stats.NewDiskStatus(dir)) + newDiskStatus := stats.NewDiskStatus(dir) + newDiskStatus.DiskType = loc.DiskType.String() + ds = append(ds, newDiskStatus) } } m["DiskStatuses"] = ds diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index cd11356b9..3e977cfd4 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -2,33 +2,33 @@ package weed_server import ( "bytes" - "context" + "encoding/json" "errors" "fmt" "io" "mime" - "mime/multipart" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" "time" - "encoding/json" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) -var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") +var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { + glog.V(9).Info(r.Method + " " + r.URL.Path + " " + r.Header.Get("Range")) + stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() start := time.Now() defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }() @@ -43,18 +43,18 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) volumeId, err := needle.NewVolumeId(vid) if err != nil { - glog.V(2).Infoln("parsing error:", err, r.URL.Path) + glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } err = n.ParsePath(fid) if err != nil { - glog.V(2).Infoln("parsing fid error:", err, r.URL.Path) + glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } - glog.V(4).Infoln("volume", volumeId, "reading", n) + // glog.V(4).Infoln("volume", volumeId, "reading", n) hasVolume := vs.store.HasVolume(volumeId) _, hasEcVolume := vs.store.FindEcVolume(volumeId) if !hasVolume && !hasEcVolume { @@ -63,7 +63,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotFound) return } - lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String()) + lookupResult, err := operation.Lookup(vs.GetMaster, volumeId.String()) glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err) if err == nil && len(lookupResult.Locations) > 0 { u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl)) @@ -82,15 +82,24 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } cookie := n.Cookie + + readOption := &storage.ReadOption{ + ReadDeleted: r.FormValue("readDeleted") == "true", + } + var count int if hasVolume { - count, err = vs.store.ReadVolumeNeedle(volumeId, n) + count, err = vs.store.ReadVolumeNeedle(volumeId, n, readOption) } else if hasEcVolume { - count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + count, err = vs.store.ReadEcShardNeedle(volumeId, n) + } + if err != nil && err != storage.ErrorDeleted && r.FormValue("type") != "replicate" && hasVolume { + glog.V(4).Infof("read needle: %v", err) + // start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request } - glog.V(4).Infoln("read bytes", count, "error", err) + // glog.V(4).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { - glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) + glog.V(3).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) return } @@ -114,11 +123,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotModified) return } - if r.Header.Get("ETag-MD5") == "True" { - setEtag(w, n.MD5()) - } else { - setEtag(w, n.Etag()) - } + setEtag(w, n.Etag()) if n.HasPairs() { pairMap := make(map[string]string) @@ -131,14 +136,14 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } - if vs.tryHandleChunkedFile(n, filename, w, r) { + if vs.tryHandleChunkedFile(n, filename, ext, w, r) { return } if n.NameSize > 0 && filename == "" { filename = string(n.Name) if ext == "" { - ext = path.Ext(filename) + ext = filepath.Ext(filename) } } mtype := "" @@ -149,14 +154,18 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } - if ext != ".gz" { - if n.IsGzipped() { - if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - w.Header().Set("Content-Encoding", "gzip") - } else { - if n.Data, err = util.UnGzipData(n.Data); err != nil { - glog.V(0).Infoln("ungzip error:", err, r.URL.Path) - } + if n.IsCompressed() { + if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize { + if n.Data, err = util.DecompressData(n.Data); err != nil { + glog.V(0).Infoln("ungzip error:", err, r.URL.Path) + } + // } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) { + // w.Header().Set("Content-Encoding", "zstd") + } else if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && util.IsGzippedContent(n.Data) { + w.Header().Set("Content-Encoding", "gzip") + } else { + if n.Data, err = util.DecompressData(n.Data); err != nil { + glog.V(0).Infoln("uncompress error:", err, r.URL.Path) } } } @@ -168,12 +177,12 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } -func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { +func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, ext string, w http.ResponseWriter, r *http.Request) (processed bool) { if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" { return false } - chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed()) if e != nil { glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e) return false @@ -182,7 +191,9 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, fileName = chunkManifest.Name } - ext := path.Ext(fileName) + if ext == "" { + ext = filepath.Ext(fileName) + } mType := "" if chunkManifest.Mime != "" { @@ -194,10 +205,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w.Header().Set("X-File-Store", "chunked") - chunkedFileReader := &operation.ChunkedFileReader{ - Manifest: chunkManifest, - Master: vs.GetMaster(), - } + chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster()) defer chunkedFileReader.Close() rs := conditionallyResizeImages(chunkedFileReader, ext, r) @@ -213,129 +221,52 @@ func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext strin if len(ext) > 0 { ext = strings.ToLower(ext) } + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode) + } + return rs +} + +func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) { if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" { - width, height := 0, 0 if r.FormValue("width") != "" { width, _ = strconv.Atoi(r.FormValue("width")) } if r.FormValue("height") != "" { height, _ = strconv.Atoi(r.FormValue("height")) } - rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode")) } - return rs + mode = r.FormValue("mode") + shouldResize = width > 0 || height > 0 + return } func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { totalSize, e := rs.Seek(0, 2) if mimeType == "" { - if ext := path.Ext(filename); ext != "" { + if ext := filepath.Ext(filename); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - if filename != "" { - contentDisposition := "inline" - if r.FormValue("dl") != "" { - if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { - contentDisposition = "attachment" - } - } - w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) - } w.Header().Set("Accept-Ranges", "bytes") + + adjustHeaderContentDisposition(w, r, filename) + if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return nil } - rangeReq := r.Header.Get("Range") - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if _, e = rs.Seek(0, 0); e != nil { - return e - } - _, e = io.Copy(w, rs) - return e - } - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return nil - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return nil - } - if len(ranges) == 0 { - return nil - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - if _, e = rs.Seek(ra.start, 0); e != nil { + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if _, e = rs.Seek(offset, 0); e != nil { return e } - - _, e = io.CopyN(w, rs, ra.length) + _, e = io.CopyN(writer, rs, size) return e - } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return nil - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if _, e = rs.Seek(ra.start, 0); e != nil { - pw.CloseWithError(e) - return - } - if _, e = io.CopyN(part, rs, ra.length); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - _, e = io.CopyN(w, sendContent, sendSize) - return e + }) + return nil } diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go index 8d35c9c8b..437e5c45d 100644 --- a/weed/server/volume_server_handlers_ui.go +++ b/weed/server/volume_server_handlers_ui.go @@ -13,12 +13,15 @@ import ( ) func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) infos := make(map[string]interface{}) infos["Up Time"] = time.Now().Sub(startTime).String() var ds []*volume_server_pb.DiskStatus for _, loc := range vs.store.Locations { if dir, e := filepath.Abs(loc.Directory); e == nil { - ds = append(ds, stats.NewDiskStatus(dir)) + newDiskStatus := stats.NewDiskStatus(dir) + newDiskStatus.DiskType = loc.DiskType.String() + ds = append(ds, newDiskStatus) } } volumeInfos := vs.store.VolumeInfos() @@ -40,7 +43,7 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) Stats interface{} Counters *stats.ServerStats }{ - util.VERSION, + util.Version(), vs.SeedMasterNodes, normalVolumeInfos, vs.store.EcVolumes(), diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 05e21612b..3d752eda6 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "errors" "fmt" "net/http" @@ -43,18 +42,19 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + reqNeedle, originalSize, contentMd5, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return } ret := operation.UploadResult{} - _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) + isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster, vs.store, volumeId, reqNeedle, r) - // http 304 status code does not allow body + // http 204 status code does not allow body if writeError == nil && isUnchanged { - w.WriteHeader(http.StatusNotModified) + setEtag(w, reqNeedle.Etag()) + w.WriteHeader(http.StatusNoContent) return } @@ -63,12 +63,14 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { httpStatus = http.StatusInternalServerError ret.Error = writeError.Error() } - if needle.HasName() { - ret.Name = string(needle.Name) + if reqNeedle.HasName() { + ret.Name = string(reqNeedle.Name) } ret.Size = uint32(originalSize) - ret.ETag = needle.Etag() + ret.ETag = reqNeedle.Etag() + ret.Mime = string(reqNeedle.Mime) setEtag(w, ret.ETag) + w.Header().Set("Content-MD5", contentMd5) writeJsonQuiet(w, r, httpStatus, ret) } @@ -97,12 +99,12 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId) if hasEcVolume { - count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie) + count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie) writeDeleteResult(err, count, w, r) return } - _, ok := vs.store.ReadVolumeNeedle(volumeId, n) + _, ok := vs.store.ReadVolumeNeedle(volumeId, n, nil) if ok != nil { m := make(map[string]uint32) m["size"] = 0 @@ -119,13 +121,13 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { count := int64(n.Size) if n.IsChunkedManifest() { - chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed()) if e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Load chunks manifest error: %v", e)) return } // make sure all chunks had deleted before delete manifest - if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil { + if e := chunkManifest.DeleteChunks(vs.GetMaster, false, vs.grpcDialOption); e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) return } @@ -140,7 +142,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { } } - _, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r) + _, err := topology.ReplicatedDelete(vs.GetMaster, vs.store, volumeId, n, r) writeDeleteResult(err, count, w, r) @@ -165,3 +167,11 @@ func setEtag(w http.ResponseWriter, etag string) { } } } + +func getEtag(resp *http.Response) (etag string) { + etag = resp.Header.Get("ETag") + if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") { + return etag[1 : len(etag)-1] + } + return +} diff --git a/weed/server/volume_server_tcp_handlers_write.go b/weed/server/volume_server_tcp_handlers_write.go new file mode 100644 index 000000000..a009611da --- /dev/null +++ b/weed/server/volume_server_tcp_handlers_write.go @@ -0,0 +1,137 @@ +package weed_server + +import ( + "bufio" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "net" + "strings" +) + +func (vs *VolumeServer) HandleTcpConnection(c net.Conn) { + defer c.Close() + + glog.V(0).Infof("Serving writes from %s", c.RemoteAddr().String()) + + bufReader := bufio.NewReaderSize(c, 1024*1024) + bufWriter := bufio.NewWriterSize(c, 1024*1024) + + for { + cmd, err := bufReader.ReadString('\n') + if err != nil { + if err != io.EOF { + glog.Errorf("read command from %s: %v", c.RemoteAddr().String(), err) + } + return + } + cmd = cmd[:len(cmd)-1] + switch cmd[0] { + case '+': + fileId := cmd[1:] + err = vs.handleTcpPut(fileId, bufReader) + if err == nil { + bufWriter.Write([]byte("+OK\n")) + } else { + bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) + } + case '-': + fileId := cmd[1:] + err = vs.handleTcpDelete(fileId) + if err == nil { + bufWriter.Write([]byte("+OK\n")) + } else { + bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) + } + case '?': + fileId := cmd[1:] + err = vs.handleTcpGet(fileId, bufWriter) + case '!': + bufWriter.Flush() + } + + } + +} + +func (vs *VolumeServer) handleTcpGet(fileId string, writer *bufio.Writer) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + volume := vs.store.GetVolume(volumeId) + if volume == nil { + return fmt.Errorf("volume %d not found", volumeId) + } + + err = volume.StreamRead(n, writer) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) handleTcpPut(fileId string, bufReader *bufio.Reader) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + volume := vs.store.GetVolume(volumeId) + if volume == nil { + return fmt.Errorf("volume %d not found", volumeId) + } + + sizeBuf := make([]byte, 4) + if _, err = bufReader.Read(sizeBuf); err != nil { + return err + } + dataSize := util.BytesToUint32(sizeBuf) + + err = volume.StreamWrite(n, bufReader, dataSize) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) handleTcpDelete(fileId string) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + _, err = vs.store.DeleteVolumeNeedle(volumeId, n) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) parseFileId(fileId string) (needle.VolumeId, *needle.Needle, error) { + + commaIndex := strings.LastIndex(fileId, ",") + if commaIndex <= 0 { + return 0, nil, fmt.Errorf("unknown fileId %s", fileId) + } + + vid, fid := fileId[0:commaIndex], fileId[commaIndex+1:] + + volumeId, ve := needle.NewVolumeId(vid) + if ve != nil { + return 0, nil, fmt.Errorf("unknown volume id in fileId %s", fileId) + } + + n := new(needle.Needle) + n.ParsePath(fid) + return volumeId, n, nil +} diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index 81496b1de..ee4c2e31d 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -1,11 +1,17 @@ -package master_ui +package volume_server_ui import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "html/template" "strconv" "strings" ) +func percentFrom(total uint64, part_of uint64) string { + return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100) +} + func join(data []int64) string { var ret []string for _, d := range data { @@ -15,7 +21,9 @@ func join(data []int64) string { } var funcMap = template.FuncMap{ - "join": join, + "join": join, + "bytesToHumanReadable": util.BytesToHumanReadable, + "percentFrom": percentFrom, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOCTYPE html> @@ -57,13 +65,27 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <div class="row"> <div class="col-sm-6"> <h2>Disk Stats</h2> - <table class="table table-condensed table-striped"> + <table class="table table-striped"> + <thead> + <tr> + <th>Path</th> + <th>Disk</th> + <th>Total</th> + <th>Free</th> + <th>Usage</th> + </tr> + </thead> + <tbody> {{ range .DiskStatuses }} <tr> - <th>{{ .Dir }}</th> - <td>{{ .Free }} Bytes Free</td> + <td>{{ .Dir }}</td> + <td>{{ .DiskType }}</td> + <td>{{ bytesToHumanReadable .All }}</td> + <td>{{ bytesToHumanReadable .Free }}</td> + <td>{{ percentFrom .All .Used}}%</td> </tr> {{ end }} + </tbody> </table> </div> @@ -107,6 +129,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <tr> <th>Id</th> <th>Collection</th> + <th>Disk</th> <th>Data Size</th> <th>Files</th> <th>Trash</th> @@ -119,9 +142,10 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <tr> <td><code>{{ .Id }}</code></td> <td>{{ .Collection }}</td> - <td>{{ .Size }} Bytes</td> + <td>{{ .DiskType }}</td> + <td>{{ bytesToHumanReadable .Size }}</td> <td>{{ .FileCount }}</td> - <td>{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes</td> + <td>{{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}</td> <td>{{ .Ttl }}</td> <td>{{ .ReadOnly }}</td> </tr> @@ -149,9 +173,9 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <tr> <td><code>{{ .Id }}</code></td> <td>{{ .Collection }}</td> - <td>{{ .Size }} Bytes</td> + <td>{{ bytesToHumanReadable .Size }}</td> <td>{{ .FileCount }}</td> - <td>{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes</td> + <td>{{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}</td> <td>{{ .RemoteStorageName }}</td> <td>{{ .RemoteStorageKey }}</td> </tr> @@ -177,7 +201,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`<!DOC <tr> <td><code>{{ .VolumeId }}</code></td> <td>{{ .Collection }}</td> - <td>{{ .ShardSize }} Bytes</td> + <td>{{ bytesToHumanReadable .ShardSize }}</td> <td>{{ .ShardIdList }}</td> <td>{{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}</td> </tr> diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index abd0b66eb..c3f68fdee 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -5,21 +5,23 @@ import ( "context" "fmt" "io" + "math" "os" "path" "strings" "time" + "github.com/chrislusf/seaweedfs/weed/util/buffered_writer" "golang.org/x/net/webdav" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -31,14 +33,19 @@ type WebDavOption struct { BucketsPath string GrpcDialOption grpc.DialOption Collection string + Replication string + DiskType string Uid uint32 Gid uint32 + Cipher bool + CacheDir string + CacheSizeMB int64 } type WebDavServer struct { option *WebDavOption secret security.SigningKey - filer *filer2.Filer + filer *filer.Filer grpcDialOption grpc.DialOption Handler *webdav.Handler } @@ -49,7 +56,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { ws = &WebDavServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), Handler: &webdav.Handler{ FileSystem: fs, LockSystem: webdav.NewMemLS(), @@ -64,8 +71,10 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { type WebDavFileSystem struct { option *WebDavOption secret security.SigningKey - filer *filer2.Filer + filer *filer.Filer grpcDialOption grpc.DialOption + chunkCache *chunk_cache.TieredChunkCache + signature int32 } type FileInfo struct { @@ -89,23 +98,40 @@ type WebDavFile struct { isDirectory bool off int64 entry *filer_pb.Entry - entryViewCache []filer2.VisibleInterval + entryViewCache []filer.VisibleInterval + reader io.ReaderAt + bufWriter *buffered_writer.BufferedWriteCloser + collection string + replication string } func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { + + cacheUniqueId := util.Md5String([]byte("webdav" + option.FilerGrpcAddress + util.Version()))[0:8] + cacheDir := path.Join(option.CacheDir, cacheUniqueId) + + os.MkdirAll(cacheDir, os.FileMode(0755)) + chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024) return &WebDavFileSystem{ - option: option, + option: option, + chunkCache: chunkCache, + signature: util.RandomInt32(), }, nil } -func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&WebDavFileSystem{}) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { +func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } +func (fs *WebDavFileSystem) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} func clearName(name string) (string, error) { slashed := strings.HasSuffix(name, "/") @@ -137,8 +163,8 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm return os.ErrExist } - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(fullDirPath).DirAndName() + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + dir, name := util.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ @@ -152,10 +178,11 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm Gid: fs.option.Gid, }, }, + Signatures: []int32{fs.signature}, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -185,9 +212,9 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f fs.removeAll(ctx, fullFilePath) } - dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + dir, name := util.FullPath(fullFilePath).DirAndName() + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, @@ -199,10 +226,11 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f Uid: fs.option.Uid, Gid: fs.option.Gid, Collection: fs.option.Collection, - Replication: "000", + Replication: fs.option.Replication, TtlSec: 0, }, }, + Signatures: []int32{fs.signature}, }); err != nil { return fmt.Errorf("create %s: %v", fullFilePath, err) } @@ -215,6 +243,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f fs: fs, name: fullFilePath, isDirectory: false, + bufWriter: buffered_writer.NewBufferedWriteCloser(4 * 1024 * 1024), }, nil } @@ -230,6 +259,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f fs: fs, name: fullFilePath, isDirectory: false, + bufWriter: buffered_writer.NewBufferedWriteCloser(4 * 1024 * 1024), }, nil } @@ -240,34 +270,10 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) return err } - fi, err := fs.stat(ctx, fullFilePath) - if err != nil { - return err - } + dir, name := util.FullPath(fullFilePath).DirAndName() - if fi.IsDir() { - //_, err = fs.db.Exec(`delete from filesystem where fullFilePath like $1 escape '\'`, strings.Replace(fullFilePath, `%`, `\%`, -1)+`%`) - } else { - //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) - } - dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: true, - } + return filer_pb.Remove(fs, dir, name, true, false, false, false, []int32{fs.signature}) - glog.V(3).Infof("removing entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - return fmt.Errorf("remove %s: %v", fullFilePath, err) - } - - return nil - }) - return err } func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error { @@ -307,10 +313,10 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) return os.ErrExist } - oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() - newDir, newBaseName := filer2.FullPath(newName).DirAndName() + oldDir, oldBaseName := util.FullPath(oldName).DirAndName() + newDir, newBaseName := util.FullPath(newName).DirAndName() - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: oldDir, @@ -335,23 +341,23 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } + fullpath := util.FullPath(fullFilePath) + var fi FileInfo - entry, err := filer2.GetEntry(ctx, fs, fullFilePath) + entry, err := filer_pb.GetEntry(fs, fullpath) if entry == nil { return nil, os.ErrNotExist } if err != nil { return nil, err } - fi.size = int64(filer2.TotalSize(entry.GetChunks())) - fi.name = fullFilePath + fi.size = int64(filer.FileSize(entry)) + fi.name = string(fullpath) fi.mode = os.FileMode(entry.Attributes.FileMode) fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) fi.isDirectory = entry.IsDirectory - _, fi.name = path.Split(path.Clean(fi.name)) - if fi.name == "" { - fi.name = "/" + if fi.name == "/" { fi.modifiledTime = time.Now() fi.isDirectory = true } @@ -365,32 +371,21 @@ func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, return fs.stat(ctx, name) } -func (f *WebDavFile) Write(buf []byte) (int, error) { - - glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) - - var err error - ctx := context.Background() - if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) - } - - if f.entry == nil { - return 0, err - } - if err != nil { - return 0, err - } +func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { var fileId, host string var auth security.EncodedJwt - if err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if flushErr := f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + ctx := context.Background() request := &filer_pb.AssignVolumeRequest{ Count: 1, - Replication: "000", + Replication: f.fs.option.Replication, Collection: f.fs.option.Collection, + DiskType: f.fs.option.DiskType, + Path: name, } resp, err := client.AssignVolume(ctx, request) @@ -398,79 +393,126 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + f.collection, f.replication = resp.Collection, resp.Replication return nil - }); err != nil { - return 0, fmt.Errorf("filerGrpcAddress assign volume: %v", err) + }); flushErr != nil { + return nil, f.collection, f.replication, fmt.Errorf("filerGrpcAddress assign volume: %v", flushErr) } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "", nil, auth) - if err != nil { - glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) - return 0, fmt.Errorf("upload data: %v", err) + uploadResult, flushErr, _ := operation.Upload(fileUrl, f.name, f.fs.option.Cipher, reader, false, "", nil, auth) + if flushErr != nil { + glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, flushErr) + return nil, f.collection, f.replication, fmt.Errorf("upload data: %v", flushErr) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, err) - return 0, fmt.Errorf("upload result: %v", uploadResult.Error) + glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, flushErr) + return nil, f.collection, f.replication, fmt.Errorf("upload result: %v", uploadResult.Error) + } + return uploadResult.ToPbFileChunk(fileId, offset), f.collection, f.replication, nil +} + +func (f *WebDavFile) Write(buf []byte) (int, error) { + + glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + + dir, _ := util.FullPath(f.name).DirAndName() + + var getErr error + ctx := context.Background() + if f.entry == nil { + f.entry, getErr = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) } - chunk := &filer_pb.FileChunk{ - FileId: fileId, - Offset: f.off, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + if f.entry == nil { + return 0, getErr + } + if getErr != nil { + return 0, getErr } - f.entry.Chunks = append(f.entry.Chunks, chunk) - dir, _ := filer2.FullPath(f.name).DirAndName() + if f.bufWriter.FlushFunc == nil { + f.bufWriter.FlushFunc = func(data []byte, offset int64) (flushErr error) { - err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - f.entry.Attributes.Mtime = time.Now().Unix() + var chunk *filer_pb.FileChunk + chunk, f.collection, f.replication, flushErr = f.saveDataAsChunk(bytes.NewReader(data), f.name, offset) - request := &filer_pb.UpdateEntryRequest{ - Directory: dir, - Entry: f.entry, + if flushErr != nil { + return fmt.Errorf("%s upload result: %v", f.name, flushErr) + } + + f.entry.Content = nil + f.entry.Chunks = append(f.entry.Chunks, chunk) + + return flushErr } + f.bufWriter.CloseFunc = func() error { + + manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.Chunks) + if manifestErr != nil { + // not good, but should be ok + glog.V(0).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr) + } else { + f.entry.Chunks = manifestedChunks + } - if _, err := client.UpdateEntry(ctx, request); err != nil { - return fmt.Errorf("update %s: %v", f.name, err) + flushErr := f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + f.entry.Attributes.Mtime = time.Now().Unix() + f.entry.Attributes.Collection = f.collection + f.entry.Attributes.Replication = f.replication + + request := &filer_pb.UpdateEntryRequest{ + Directory: dir, + Entry: f.entry, + Signatures: []int32{f.fs.signature}, + } + + if _, err := client.UpdateEntry(ctx, request); err != nil { + return fmt.Errorf("update %s: %v", f.name, err) + } + + return nil + }) + return flushErr } + } - return nil - }) + written, err := f.bufWriter.Write(buf) if err == nil { glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) - f.off += int64(len(buf)) + f.off += int64(written) } - return len(buf), err + return written, err } func (f *WebDavFile) Close() error { glog.V(2).Infof("WebDavFileSystem.Close %v", f.name) + err := f.bufWriter.Close() + if f.entry != nil { f.entry = nil f.entryViewCache = nil } - return nil + return err } func (f *WebDavFile) Read(p []byte) (readSize int, err error) { glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) - ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -478,43 +520,41 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { if err != nil { return 0, err } - if len(f.entry.Chunks) == 0 { + fileSize := int64(filer.FileSize(f.entry)) + if fileSize == 0 { return 0, io.EOF } if f.entryViewCache == nil { - f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks) + f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.Chunks) + f.reader = nil } - chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - - totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, f.name, p, chunkViews, f.off) - if err != nil { - return 0, err + if f.reader == nil { + chunkViews := filer.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt64) + f.reader = filer.NewChunkReaderAtFromClient(filer.LookupFn(f.fs), chunkViews, f.fs.chunkCache, fileSize) } - readSize = int(totalRead) - glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+totalRead) + readSize, err = f.reader.ReadAt(p, f.off) - f.off += totalRead - if readSize == 0 { - return 0, io.EOF + glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) + f.off += int64(readSize) + + if err != nil && err != io.EOF { + glog.Errorf("file read %s: %v", f.name, err) } return + } func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) - ctx := context.Background() - dir := f.name - if dir != "/" && strings.HasSuffix(dir, "/") { - dir = dir[:len(dir)-1] - } + dir, _ := util.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(ctx, f.fs, dir, "", func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error { fi := FileInfo{ - size: int64(filer2.TotalSize(entry.GetChunks())), + size: int64(filer.FileSize(entry)), name: entry.Name, mode: os.FileMode(entry.Attributes.FileMode), modifiledTime: time.Unix(entry.Attributes.Mtime, 0), @@ -526,6 +566,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { } glog.V(4).Infof("entry: %v", fi.name) ret = append(ret, &fi) + return nil }) old := f.off @@ -556,9 +597,9 @@ func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) { var err error switch whence { - case 0: + case io.SeekStart: f.off = 0 - case 2: + case io.SeekEnd: if fi, err := f.fs.stat(ctx, f.name); err != nil { return 0, err } else { diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go index fbaddcd51..e43f2a093 100644 --- a/weed/shell/command_collection_delete.go +++ b/weed/shell/command_collection_delete.go @@ -2,6 +2,7 @@ package shell import ( "context" + "flag" "fmt" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "io" @@ -21,23 +22,40 @@ func (c *commandCollectionDelete) Name() string { func (c *commandCollectionDelete) Help() string { return `delete specified collection - collection.delete <collection_name> + collection.delete -collection <collection_name> -force ` } func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) == 0 { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collectionName := colDeleteCommand.String("collection", "", "collection to delete. Use '_default_' for the empty-named collection.") + applyBalancing := colDeleteCommand.Bool("force", false, "apply the collection") + if err = colDeleteCommand.Parse(args); err != nil { return nil } - collectionName := args[0] + if *collectionName == "" { + return fmt.Errorf("empty collection name is not allowed") + } + + if *collectionName == "_default_" { + *collectionName = "" + } + + if !*applyBalancing { + fmt.Fprintf(writer, "collection '%s' will be deleted. Use -force to apply the change.\n", *collectionName) + return nil + } - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ - Name: collectionName, + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: *collectionName, }) return err }) @@ -45,7 +63,7 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ return } - fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName) + fmt.Fprintf(writer, "collection %s is deleted.\n", *collectionName) return nil } diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index c4325c66f..2a114e61b 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -41,9 +41,8 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) { var resp *master_pb.CollectionListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{ + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ IncludeNormalVolumes: includeNormalVolumes, IncludeEcVolumes: includeEcVolumes, }) diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 96599372e..b1ca926d5 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -1,9 +1,9 @@ package shell import ( - "context" "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io" "sort" @@ -29,7 +29,7 @@ func (c *commandEcBalance) Help() string { Algorithm: - For each type of volume server (different max volume count limit){ + func EcBalance() { for each collection: balanceEcVolumes(collectionName) for each rack: @@ -99,6 +99,10 @@ func (c *commandEcBalance) Help() string { func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") @@ -107,10 +111,8 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W return nil } - ctx := context.Background() - // collect all ec nodes - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc) + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc) if err != nil { return err } @@ -138,7 +140,7 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W } } - if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil { + if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil { return fmt.Errorf("balance ec racks: %v", err) } @@ -162,38 +164,36 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack { func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { - ctx := context.Background() - fmt.Printf("balanceEcVolumes %s\n", collection) - if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil { + if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil { return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { - return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) + if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + return fmt.Errorf("balance within racks collection %s ec shards: %v", collection, err) } return nil } -func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { +func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { // vid => []ecNode vidLocations := collectVolumeIdToEcNodes(allEcNodes) // deduplicate ec shards for vid, locations := range vidLocations { - if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil { + if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil { return err } } return nil } -func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { +func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { // check whether this volume has ecNodes that are over average shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) @@ -215,10 +215,10 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti duplicatedShardIds := []uint32{uint32(shardId)} for _, ecNode := range ecNodes[1:] { - if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } - if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } ecNode.deleteEcVolumeShards(vid, duplicatedShardIds) @@ -227,19 +227,19 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti return nil } -func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) // spread the ec shards evenly for vid, locations := range vidLocations { - if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { + if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { +func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { // calculate average number of shards an ec rack should have for one volume averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) @@ -274,7 +274,7 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c for _, n := range racks[rackId].ecNodes { possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) } - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -306,7 +306,7 @@ func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]i return "" } -func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) @@ -326,11 +326,13 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all var possibleDestinationEcNodes []*EcNode for _, n := range racks[RackId(rackId)].ecNodes { - possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + if _, found := n.info.DiskInfos[string(types.HardDriveType)]; found { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + } } sourceEcNodes := rackEcNodesWithVid[rackId] averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes)) - if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { + if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { return err } } @@ -338,7 +340,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all return nil } -func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { for _, ecNode := range existingLocations { @@ -353,7 +355,7 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -365,18 +367,18 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, return nil } -func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { +func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { // balance one rack for all ec shards for _, ecRack := range racks { - if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil { + if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { +func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { if len(ecRack.ecNodes) <= 1 { return nil @@ -387,11 +389,15 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack rackEcNodes = append(rackEcNodes, node) } - ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) { - for _, ecShardInfo := range node.info.EcShardInfos { + ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) { + diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)] + if !found { + return + } + for _, ecShardInfo := range diskInfo.EcShardInfos { count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount() } - return node.info.Id, count + return ecNode.info.Id, count }) var totalShardCount int @@ -412,26 +418,30 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount { emptyNodeIds := make(map[uint32]bool) - for _, shards := range emptyNode.info.EcShardInfos { - emptyNodeIds[shards.Id] = true + if emptyDiskInfo, found := emptyNode.info.DiskInfos[string(types.HardDriveType)]; found { + for _, shards := range emptyDiskInfo.EcShardInfos { + emptyNodeIds[shards.Id] = true + } } - for _, shards := range fullNode.info.EcShardInfos { - if _, found := emptyNodeIds[shards.Id]; !found { - for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() { - - fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) - - err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) - if err != nil { - return err + if fullDiskInfo, found := fullNode.info.DiskInfos[string(types.HardDriveType)]; found { + for _, shards := range fullDiskInfo.EcShardInfos { + if _, found := emptyNodeIds[shards.Id]; !found { + for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() { + + fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) + + err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) + if err != nil { + return err + } + + ecNodeIdToShardCount[emptyNode.info.Id]++ + ecNodeIdToShardCount[fullNode.info.Id]-- + hasMove = true + break } - - ecNodeIdToShardCount[emptyNode.info.Id]++ - ecNodeIdToShardCount[fullNode.info.Id]-- - hasMove = true break } - break } } } @@ -440,7 +450,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack return nil } -func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) @@ -458,7 +468,7 @@ func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, a fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) - err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) + err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) if err != nil { return err } @@ -512,7 +522,11 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[ func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode { vidLocations := make(map[needle.VolumeId][]*EcNode) for _, ecNode := range allEcNodes { - for _, shardInfo := range ecNode.info.EcShardInfos { + diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)] + if !found { + continue + } + for _, shardInfo := range diskInfo.EcShardInfos { vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode) } } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index 2beed4742..fd35bb14b 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -3,6 +3,7 @@ package shell import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "math" "sort" @@ -15,26 +16,26 @@ import ( "google.golang.org/grpc" ) -func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { +func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { copiedShardIds := []uint32{uint32(shardId)} if applyBalancing { // ask destination node to copy shard and the ecx file from source node, and mount it - copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } // ask source node to delete the shard, and maybe the ecx file - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } @@ -50,7 +51,7 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist } -func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, +func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer *EcNode, shardIdsToCopy []uint32, volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { @@ -61,7 +62,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption if targetServer.info.Id != existingLocation { fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, @@ -76,7 +77,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption } fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id) - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, @@ -159,8 +160,15 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou return } -func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { - return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos) +func countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) { + if dn.DiskInfos == nil { + return 0 + } + diskInfo := dn.DiskInfos[string(diskType)] + if diskInfo == nil { + return 0 + } + return int(diskInfo.MaxVolumeCount-diskInfo.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos) } type RackId string @@ -173,30 +181,47 @@ type EcNode struct { freeEcSlot int } +func (ecNode *EcNode) localShardIdCount(vid uint32) int { + for _, diskInfo := range ecNode.info.DiskInfos { + for _, ecShardInfo := range diskInfo.EcShardInfos { + if vid == ecShardInfo.Id { + shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + return shardBits.ShardIdCount() + } + } + } + return 0 +} + type EcRack struct { ecNodes map[EcNodeId]*EcNode freeEcSlot int } -func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { +func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { // list all possible locations - var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) if err != nil { - return nil, 0, err + return } // find out all volume servers with one slot left. - eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter) + + sortEcNodesByFreeslotsDecending(ecNodes) + + return +} + +func collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int) { + eachDataNode(topo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { if selectedDataCenter != "" && selectedDataCenter != dc { return } - freeEcSlots := countFreeShardSlots(dn) + freeEcSlots := countFreeShardSlots(dn, types.HardDriveType) ecNodes = append(ecNodes, &EcNode{ info: dn, dc: dc, @@ -205,19 +230,15 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen }) totalFreeEcSlots += freeEcSlots }) - - sortEcNodesByFreeslotsDecending(ecNodes) - return } -func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { +func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ + _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeDeletedShardIds, @@ -227,13 +248,12 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt } -func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { +func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ + _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{ VolumeId: uint32(volumeId), ShardIds: toBeUnmountedhardIds, }) @@ -241,13 +261,12 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, }) } -func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { +func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeMountedhardIds, @@ -256,15 +275,21 @@ func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, }) } +func divide(total, n int) float64 { + return float64(total) / float64(n) +} + func ceilDivide(total, n int) int { return int(math.Ceil(float64(total) / float64(n))) } func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits { - for _, shardInfo := range ecNode.info.EcShardInfos { - if needle.VolumeId(shardInfo.Id) == vid { - return erasure_coding.ShardBits(shardInfo.EcIndexBits) + if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found { + for _, shardInfo := range diskInfo.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + return erasure_coding.ShardBits(shardInfo.EcIndexBits) + } } } @@ -274,18 +299,26 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode { foundVolume := false - for _, shardInfo := range ecNode.info.EcShardInfos { - if needle.VolumeId(shardInfo.Id) == vid { - oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) - newShardBits := oldShardBits - for _, shardId := range shardIds { - newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)] + if found { + for _, shardInfo := range diskInfo.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() + foundVolume = true + break } - shardInfo.EcIndexBits = uint32(newShardBits) - ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() - foundVolume = true - break } + } else { + diskInfo = &master_pb.DiskInfo{ + Type: string(types.HardDriveType), + } + ecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo } if !foundVolume { @@ -293,10 +326,11 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, for _, shardId := range shardIds { newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) } - ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{ + diskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{ Id: uint32(vid), Collection: collection, EcIndexBits: uint32(newShardBits), + DiskType: string(types.HardDriveType), }) ecNode.freeEcSlot -= len(shardIds) } @@ -306,15 +340,17 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode { - for _, shardInfo := range ecNode.info.EcShardInfos { - if needle.VolumeId(shardInfo.Id) == vid { - oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) - newShardBits := oldShardBits - for _, shardId := range shardIds { - newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId)) + if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found { + for _, shardInfo := range diskInfo.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() } - shardInfo.EcIndexBits = uint32(newShardBits) - ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() } } diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go index 1f9ad2ff9..dafdb041a 100644 --- a/weed/shell/command_ec_decode.go +++ b/weed/shell/command_ec_decode.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io" "google.golang.org/grpc" @@ -36,6 +37,10 @@ func (c *commandEcDecode) Help() string { func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volumeId := encodeCommand.Int("volumeId", 0, "the volume id") collection := encodeCommand.String("collection", "", "the collection name") @@ -43,25 +48,24 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // collect topology information - topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + topologyInfo, _, err := collectTopologyInfo(commandEnv) if err != nil { return err } // volumeId is provided if vid != 0 { - return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid) + return doEcDecode(commandEnv, topologyInfo, *collection, vid) } // apply to all volumes in the collection volumeIds := collectEcShardIds(topologyInfo, *collection) fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil { + if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil { return err } } @@ -69,26 +73,26 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { +func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { // find volume location nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) // collect ec shards to the server with most space - targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid) + targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid) if err != nil { return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) } // generate a normal volume - err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation) if err != nil { return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) } // delete the previous ec shards - err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) if err != nil { return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) } @@ -96,11 +100,11 @@ func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb return nil } -func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { +func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { // mount volume if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(vid), }) return mountErr @@ -111,7 +115,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO // unmount ec shards for location, ecIndexBits := range nodeToEcIndexBits { fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) - err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) if err != nil { return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) } @@ -119,7 +123,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO // delete ec shards for location, ecIndexBits := range nodeToEcIndexBits { fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) - err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) if err != nil { return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) } @@ -128,12 +132,12 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO return nil } -func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { +func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ + _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{ VolumeId: uint32(vid), Collection: collection, }) @@ -144,7 +148,7 @@ func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, v } -func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { +func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { maxShardCount := 0 var exisitngEcIndexBits erasure_coding.ShardBits @@ -174,7 +178,7 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(vid), Collection: collection, ShardIds: needToCopyEcIndexBits.ToUint32Slice(), @@ -204,27 +208,29 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB } -func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { +func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) { var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { return } - return resp.TopologyInfo, nil + return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil } func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { - for _, v := range dn.EcShardInfos { - if v.Collection == selectedCollection && v.Id == uint32(vid) { - ecShardInfos = append(ecShardInfos, v) + if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found { + for _, v := range diskInfo.EcShardInfos { + if v.Collection == selectedCollection && v.Id == uint32(vid) { + ecShardInfos = append(ecShardInfos, v) + } } } }) @@ -236,9 +242,11 @@ func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection stri vidMap := make(map[uint32]bool) eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { - for _, v := range dn.EcShardInfos { - if v.Collection == selectedCollection { - vidMap[v.Id] = true + if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found { + for _, v := range diskInfo.EcShardInfos { + if v.Collection == selectedCollection { + vidMap[v.Id] = true + } } } }) @@ -254,9 +262,11 @@ func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeI nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { - for _, v := range dn.EcShardInfos { - if v.Id == uint32(vid) { - nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found { + for _, v := range diskInfo.EcShardInfos { + if v.Id == uint32(vid) { + nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + } } } }) diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 58527abf2..634cb11e2 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -54,6 +54,10 @@ func (c *commandEcEncode) Help() string { func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volumeId := encodeCommand.Int("volumeId", 0, "the volume id") collection := encodeCommand.String("collection", "", "the collection name") @@ -63,22 +67,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // volumeId is provided if vid != 0 { - return doEcEncode(ctx, commandEnv, *collection, vid) + return doEcEncode(commandEnv, *collection, vid) } // apply to all volumes in the collection - volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) if err != nil { return err } fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil { + if err = doEcEncode(commandEnv, *collection, vid); err != nil { return err } } @@ -86,7 +89,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { +func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { @@ -96,19 +99,19 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, // fmt.Printf("found ec %d shards on %v\n", vid, locations) // mark the volume as readonly - err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, vid, locations) if err != nil { return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // generate ec shards - err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) + err = generateEcShards(commandEnv.option.GrpcDialOption, vid, collection, locations[0].Url) if err != nil { return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) } // balance the ec shards to current cluster - err = spreadEcShards(ctx, commandEnv, vid, collection, locations) + err = spreadEcShards(commandEnv, vid, collection, locations) if err != nil { return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) } @@ -116,12 +119,14 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, return nil } -func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { +func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { for _, location := range locations { + fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url) + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ + _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), }) return markErr @@ -136,10 +141,12 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol return nil } -func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { +func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer) err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ + _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -150,9 +157,9 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum } -func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { +func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "") + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -169,26 +176,27 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return err } // ask the source volume server to clean up copied ec shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err) } // ask the source volume server to delete the original volume for _, location := range existingLocations { - err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url) + fmt.Printf("delete volume %d from %s\n", volumeId, location.Url) + err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url) if err != nil { return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err) } @@ -198,9 +206,9 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } -func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*EcNode, allocatedEcIds [][]uint32, - volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { +func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { + + fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url) // parallelize shardIdChan := make(chan []uint32, len(targetServers)) @@ -213,7 +221,7 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia wg.Add(1) go func(server *EcNode, allocatedEcShardIds []uint32) { defer wg.Done() - copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, allocatedEcShardIds, volumeId, collection, existingLocation.Url) if copyErr != nil { err = copyErr @@ -255,13 +263,10 @@ func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { return allocated } -func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { +func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { - var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + // collect topology information + topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv) if err != nil { return } @@ -269,14 +274,16 @@ func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, se quietSeconds := int64(quietPeriod / time.Second) nowUnixSeconds := time.Now().Unix() - fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds) + fmt.Printf("collect volumes quiet for: %d seconds\n", quietSeconds) vidMap := make(map[uint32]bool) - eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { - for _, v := range dn.VolumeInfos { - if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds { - if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 { - vidMap[v.Id] = true + eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, diskInfo := range dn.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds { + if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 { + vidMap[v.Id] = true + } } } } diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 2e2fca743..8d5d7bb91 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -56,6 +56,10 @@ func (c *commandEcRebuild) Help() string { func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") applyChanges := fixCommand.Bool("force", false, "apply the changes") @@ -64,7 +68,7 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W } // collect all ec nodes - allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "") + allEcNodes, _, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -92,8 +96,6 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error { - ctx := context.Background() - fmt.Printf("rebuildEcVolumes %s\n", collection) // collect vid => each shard locations, similar to ecShardMap in topology.go @@ -117,7 +119,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return fmt.Errorf("disk space is not enough") } - if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { + if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { return err } } @@ -125,13 +127,13 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return nil } -func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { +func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) // collect shard files to rebuilder local disk var generatedShardIds []uint32 - copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) + copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) if err != nil { return err } @@ -139,7 +141,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * // clean up working files // ask the rebuilder to delete the copied shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) if err != nil { fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds) } @@ -151,13 +153,13 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * } // generate ec shards, and maybe ecx file - generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) + generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) if err != nil { return err } // mount the generated shards - err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) + err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) if err != nil { return err } @@ -167,11 +169,10 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * return nil } -func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { +func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ + resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -183,14 +184,16 @@ func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, return } -func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { +func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { needEcxFile := true var localShardBits erasure_coding.ShardBits - for _, ecShardInfo := range rebuilder.info.EcShardInfos { - if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId { - needEcxFile = false - localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + for _, diskInfo := range rebuilder.info.DiskInfos { + for _, ecShardInfo := range diskInfo.EcShardInfos { + if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId { + needEcxFile = false + localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + } } } @@ -210,7 +213,7 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder var copyErr error if applyBalancing { copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: []uint32{uint32(shardId)}, @@ -246,15 +249,17 @@ type EcShardMap map[needle.VolumeId]EcShardLocations type EcShardLocations [][]*EcNode func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) { - for _, shardInfo := range ecNode.info.EcShardInfos { - if shardInfo.Collection == collection { - existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] - if !found { - existing = make([][]*EcNode, erasure_coding.TotalShardsCount) - ecShardMap[needle.VolumeId(shardInfo.Id)] = existing - } - for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { - existing[shardId] = append(existing[shardId], ecNode) + for _, diskInfo := range ecNode.info.DiskInfos { + for _, shardInfo := range diskInfo.EcShardInfos { + if shardInfo.Collection == collection { + existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] + if !found { + existing = make([][]*EcNode, erasure_coding.TotalShardsCount) + ecShardMap[needle.VolumeId(shardInfo.Id)] = existing + } + for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { + existing[shardId] = append(existing[shardId], ecNode) + } } } } diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index c233d25d0..a1226adbb 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "testing" @@ -121,13 +120,14 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) { racks := collectRacks(allEcNodes) balanceEcVolumes(nil, "c1", allEcNodes, racks, false) - balanceEcRacks(context.Background(), nil, racks, false) + balanceEcRacks(nil, racks, false) } func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode { return &EcNode{ info: &master_pb.DataNodeInfo{ - Id: dataNodeId, + Id: dataNodeId, + DiskInfos: make(map[string]*master_pb.DiskInfo), }, dc: dc, rack: RackId(rack), diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 9db36e9d1..df43d93dc 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -1,13 +1,13 @@ package shell import ( - "context" "fmt" "io" "math" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -25,39 +25,34 @@ func (c *commandFsCat) Help() string { return `stream the file content on to the screen fs.cat /dir/file_name - fs.cat http://<filer_server>:<port>/dir/file_name ` } func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { return fmt.Errorf("%s is a directory", path) } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } - return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) + return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) }) diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index 408ec86c8..2cc28f7a2 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -1,7 +1,6 @@ package shell import ( - "context" "io" ) @@ -17,41 +16,33 @@ func (c *commandFsCd) Name() string { } func (c *commandFsCd) Help() string { - return `change directory to http://<filer_server>:<port>/dir/ + return `change directory to a directory /path/to/dir The full path can be too long to type. For example, - fs.ls http://<filer_server>:<port>/some/path/to/file_name + fs.ls /some/path/to/file_name can be simplified as - fs.cd http://<filer_server>:<port>/some/path + fs.cd /some/path fs.ls to/file_name ` } func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } if path == "/" { - commandEnv.option.FilerHost = filerServer - commandEnv.option.FilerPort = filerPort commandEnv.option.Directory = "/" return nil } - ctx := context.Background() - - err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path) + err = commandEnv.checkDirectory(path) if err == nil { - commandEnv.option.FilerHost = filerServer - commandEnv.option.FilerPort = filerPort commandEnv.option.Directory = path } diff --git a/weed/shell/command_fs_configure.go b/weed/shell/command_fs_configure.go new file mode 100644 index 000000000..02cd7ac69 --- /dev/null +++ b/weed/shell/command_fs_configure.go @@ -0,0 +1,129 @@ +package shell + +import ( + "bytes" + "flag" + "fmt" + "io" + "strings" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func init() { + Commands = append(Commands, &commandFsConfigure{}) +} + +type commandFsConfigure struct { +} + +func (c *commandFsConfigure) Name() string { + return "fs.configure" +} + +func (c *commandFsConfigure) Help() string { + return `configure and apply storage options for each location + + # see the current configuration file content + fs.configure + + # trying the changes and see the possible configuration file content + fs.configure -locationPrfix=/my/folder -collection=abc + fs.configure -locationPrfix=/my/folder -collection=abc -ttl=7d + + # example: configure adding only 1 physical volume for each bucket collection + fs.configure -locationPrfix=/buckets/ -volumeGrowthCount=1 + + # apply the changes + fs.configure -locationPrfix=/my/folder -collection=abc -apply + + # delete the changes + fs.configure -locationPrfix=/my/folder -delete -apply + +` +} + +func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + fsConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + locationPrefix := fsConfigureCommand.String("locationPrefix", "", "path prefix, required to update the path-specific configuration") + collection := fsConfigureCommand.String("collection", "", "assign writes to this collection") + replication := fsConfigureCommand.String("replication", "", "assign writes with this replication") + ttl := fsConfigureCommand.String("ttl", "", "assign writes with this ttl") + diskType := fsConfigureCommand.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") + fsync := fsConfigureCommand.Bool("fsync", false, "fsync for the writes") + volumeGrowthCount := fsConfigureCommand.Int("volumeGrowthCount", 0, "the number of physical volumes to add if no writable volumes") + isDelete := fsConfigureCommand.Bool("delete", false, "delete the configuration by locationPrefix") + apply := fsConfigureCommand.Bool("apply", false, "update and apply filer configuration") + if err = fsConfigureCommand.Parse(args); err != nil { + return nil + } + + var buf bytes.Buffer + if err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return filer.ReadEntry(commandEnv.MasterClient, client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, &buf) + }); err != nil && err != filer_pb.ErrNotFound { + return err + } + + fc := filer.NewFilerConf() + if buf.Len() > 0 { + if err = fc.LoadFromBytes(buf.Bytes()); err != nil { + return err + } + } + + if *locationPrefix != "" { + locConf := &filer_pb.FilerConf_PathConf{ + LocationPrefix: *locationPrefix, + Collection: *collection, + Replication: *replication, + Ttl: *ttl, + Fsync: *fsync, + DiskType: *diskType, + VolumeGrowthCount: uint32(*volumeGrowthCount), + } + + // check collection + if *collection != "" && strings.HasPrefix(*locationPrefix, "/buckets/") { + return fmt.Errorf("one s3 bucket goes to one collection and not customizable") + } + + // check replication + if *replication != "" { + rp, err := super_block.NewReplicaPlacementFromString(*replication) + if err != nil { + return fmt.Errorf("parse replication %s: %v", *replication, err) + } + if *volumeGrowthCount%rp.GetCopyCount() != 0 { + return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount()) + } + } + + // save it + if *isDelete { + fc.DeleteLocationConf(*locationPrefix) + } else { + fc.AddLocationConf(locConf) + } + } + + buf.Reset() + fc.ToText(&buf) + + fmt.Fprintf(writer, string(buf.Bytes())) + fmt.Fprintln(writer) + + if *apply { + + if err := filer.SaveAs(commandEnv.option.FilerHost, int(commandEnv.option.FilerPort), filer.DirectoryEtcSeaweedFS, filer.FilerConfName, "text/plain; charset=utf-8", &buf); err != nil { + return err + } + + } + + return nil + +} diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 1d7d79686..71003714d 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -1,13 +1,10 @@ package shell import ( - "context" "fmt" "io" - "google.golang.org/grpc" - - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -26,28 +23,26 @@ func (c *commandFsDu) Name() string { func (c *commandFsDu) Help() string { return `show disk usage - fs.du http://<filer_server>:<port>/dir - fs.du http://<filer_server>:<port>/dir/file_name - fs.du http://<filer_server>:<port>/dir/file_prefix + fs.du /dir + fs.du /dir/file_name + fs.du /dir/file_prefix ` } func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { path = path + "/" } var blockCount, byteCount uint64 - dir, name := filer2.FullPath(path).DirAndName() - blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) + dir, name := util.FullPath(path).DirAndName() + blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name) if name == "" && err == nil { fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) @@ -57,54 +52,33 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer } -func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) { +func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { + + err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error { + + var fileBlockCount, fileByteCount uint64 - err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { if entry.IsDirectory { subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { subDir = "/" + entry.Name } - numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "") + numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "") if err == nil { blockCount += numBlock byteCount += numByte } } else { - blockCount += uint64(len(entry.Chunks)) - byteCount += filer2.TotalSize(entry.Chunks) + fileBlockCount = uint64(len(entry.Chunks)) + fileByteCount = filer.FileSize(entry) + blockCount += fileBlockCount + byteCount += fileByteCount } if name != "" && !entry.IsDirectory { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name) } + return nil }) return } - -func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { - - filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, filerGrpcAddress, env.option.GrpcDialOption) - -} - -type commandFilerClient struct { - env *CommandEnv - filerServer string - filerPort int64 -} - -func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient { - return &commandFilerClient{ - env: env, - filerServer: filerServer, - filerPort: filerPort, - } -} -func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn) -} diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go new file mode 100644 index 000000000..33458bb6f --- /dev/null +++ b/weed/shell/command_fs_lock_unlock.go @@ -0,0 +1,55 @@ +package shell + +import ( + "github.com/chrislusf/seaweedfs/weed/util" + "io" +) + +func init() { + Commands = append(Commands, &commandUnlock{}) + Commands = append(Commands, &commandLock{}) +} + +// =========== Lock ============== +type commandLock struct { +} + +func (c *commandLock) Name() string { + return "lock" +} + +func (c *commandLock) Help() string { + return `lock in order to exclusively manage the cluster + + This is a blocking operation if there is alread another lock. +` +} + +func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + commandEnv.locker.RequestLock(util.DetectedHostAddress()) + + return nil +} + +// =========== Unlock ============== + +type commandUnlock struct { +} + +func (c *commandUnlock) Name() string { + return "unlock" +} + +func (c *commandUnlock) Help() string { + return `unlock the cluster-wide lock + +` +} + +func (c *commandUnlock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + commandEnv.locker.ReleaseLock() + + return nil +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 01842083b..592ec8be0 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "os" @@ -9,8 +8,9 @@ import ( "strconv" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -30,9 +30,6 @@ func (c *commandFsLs) Help() string { fs.ls [-l] [-a] /dir/ fs.ls [-l] [-a] /dir/file_name fs.ls [-l] [-a] /dir/file_prefix - fs.ls [-l] [-a] http://<filer_server>:<port>/dir/ - fs.ls [-l] [-a] http://<filer_server>:<port>/dir/file_name - fs.ls [-l] [-a] http://<filer_server>:<port>/dir/file_prefix ` } @@ -53,26 +50,22 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer } } - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { path = path + "/" } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() entryCount := 0 - err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error { if !showHidden && strings.HasPrefix(entry.Name, ".") { - return + return nil } entryCount++ @@ -95,18 +88,19 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer } } - if dir == "/" { + if strings.HasSuffix(dir, "/") { // just for printing - dir = "" + dir = dir[:len(dir)-1] } fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", fileMode, len(entry.Chunks), userName, groupName, - filer2.TotalSize(entry.Chunks), dir, entry.Name) + filer.FileSize(entry), dir, entry.Name) } else { fmt.Fprintf(writer, "%s\n", entry.Name) } + return nil }) if isLongFormat && err == nil { diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 5908b0a3c..e0525defa 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -1,14 +1,15 @@ package shell import ( - "context" "fmt" + "github.com/golang/protobuf/proto" "io" + "sort" "github.com/golang/protobuf/jsonpb" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -27,31 +28,25 @@ func (c *commandFsMetaCat) Help() string { fs.meta.cat /dir/ fs.meta.cat /dir/file_name - fs.meta.cat http://<filer_server>:<port>/dir/ - fs.meta.cat http://<filer_server>:<port>/dir/file_name ` } func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - ctx := context.Background() - - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } @@ -61,6 +56,13 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W Indent: " ", } + sort.Slice(respLookupEntry.Entry.Chunks, func(i, j int) bool { + if respLookupEntry.Entry.Chunks[i].Offset == respLookupEntry.Entry.Chunks[j].Offset { + return respLookupEntry.Entry.Chunks[i].Mtime < respLookupEntry.Entry.Chunks[j].Mtime + } + return respLookupEntry.Entry.Chunks[i].Offset < respLookupEntry.Entry.Chunks[j].Offset + }) + text, marshalErr := m.MarshalToString(respLookupEntry.Entry) if marshalErr != nil { return fmt.Errorf("marshal meta: %v", marshalErr) @@ -68,6 +70,12 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W fmt.Fprintf(writer, "%s\n", text) + bytes, _ := proto.Marshal(respLookupEntry.Entry) + gzippedBytes, _ := util.GzipData(bytes) + // zstdBytes, _ := util.ZstdData(bytes) + // fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes), len(zstdBytes)) + fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes)) + return nil }) diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 5ea8de9f5..46dc07e9a 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -1,15 +1,15 @@ package shell import ( - "context" "fmt" "io" "os" + "strings" + + "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -38,11 +38,6 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(nil)) - if err != nil { - return err - } - fileName := args[len(args)-1] dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644) @@ -53,9 +48,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - ctx := context.Background() - - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) @@ -80,14 +73,15 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + fullEntry.Entry.Name = strings.ReplaceAll(fullEntry.Entry.Name, "/", "x") + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, }); err != nil { return err } - fmt.Fprintf(writer, "load %s\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name)) + fmt.Fprintf(writer, "load %s\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name)) if fullEntry.Entry.IsDirectory { dirCount++ @@ -101,7 +95,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. if err == nil { fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) - fmt.Fprintf(writer, "\n%s is loaded to http://%s:%d%s\n", fileName, filerServer, filerPort, path) + fmt.Fprintf(writer, "\n%s is loaded.\n", fileName) } return err diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index a898df7a0..4342fa81d 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -1,13 +1,9 @@ package shell import ( - "context" "fmt" "io" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -36,20 +32,18 @@ func (c *commandFsMetaNotify) Help() string { func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } util.LoadConfiguration("notification", true) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) - - ctx := context.Background() + v := util.GetViper() + notification.LoadConfiguration(v, "notification.") var dirCount, fileCount uint64 - err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err = filer_pb.TraverseBfs(commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { if entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index ed070350f..37d94fe42 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -1,18 +1,18 @@ package shell import ( - "context" "flag" "fmt" "io" "os" + "path/filepath" + "strings" "sync" "sync/atomic" "time" "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -40,8 +40,6 @@ func (c *commandFsMetaSave) Help() string { The meta data will be saved into a local <filer_host>-<port>-<time>.meta file. These meta data can be later loaded by fs.meta.load command, - This assumes there are no deletions, so this is different from taking a snapshot. - ` } @@ -50,22 +48,22 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files") outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file") + isObfuscate := fsMetaSaveCommand.Bool("obfuscate", false, "obfuscate the file names") + // chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file") if err = fsMetaSaveCommand.Parse(args); err != nil { return nil } - filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) + path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) if parseErr != nil { return parseErr } - ctx := context.Background() - - t := time.Now() fileName := *outputFileName if fileName == "" { + t := time.Now() fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", - filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) } dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) @@ -74,43 +72,76 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. } defer dst.Close() - var wg sync.WaitGroup - wg.Add(1) - outputChan := make(chan []byte, 1024) - go func() { + var cipherKey util.CipherKey + if *isObfuscate { + cipherKey = util.GenCipherKey() + } + + err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) { sizeBuf := make([]byte, 4) - for b := range outputChan { + for item := range outputChan { + b := item.([]byte) util.Uint32toBytes(sizeBuf, uint32(len(b))) dst.Write(sizeBuf) dst.Write(b) } + }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { + if !entry.Entry.IsDirectory { + ext := filepath.Ext(entry.Entry.Name) + if encrypted, encErr := util.Encrypt([]byte(entry.Entry.Name), cipherKey); encErr == nil { + entry.Entry.Name = util.Base64Encode(encrypted)[:len(entry.Entry.Name)] + ext + entry.Entry.Name = strings.ReplaceAll(entry.Entry.Name, "/", "x") + } + } + bytes, err := proto.Marshal(entry) + if err != nil { + fmt.Fprintf(writer, "marshall error: %v\n", err) + return + } + + outputChan <- bytes + return nil + }) + + if err == nil { + fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", commandEnv.option.FilerHost, commandEnv.option.FilerPort, path, fileName) + } + + return err + +} + +func doTraverseBfsAndSaving(filerClient filer_pb.FilerClient, writer io.Writer, path string, verbose bool, saveFn func(outputChan chan interface{}), genFn func(entry *filer_pb.FullEntry, outputChan chan interface{}) error) error { + + var wg sync.WaitGroup + wg.Add(1) + outputChan := make(chan interface{}, 1024) + go func() { + saveFn(outputChan) wg.Done() }() var dirCount, fileCount uint64 - err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err := filer_pb.TraverseBfs(filerClient, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { protoMessage := &filer_pb.FullEntry{ Dir: string(parentPath), Entry: entry, } - bytes, err := proto.Marshal(protoMessage) - if err != nil { + if err := genFn(protoMessage, outputChan); err != nil { fmt.Fprintf(writer, "marshall error: %v\n", err) return } - outputChan <- bytes - if entry.IsDirectory { atomic.AddUint64(&dirCount, 1) } else { atomic.AddUint64(&fileCount, 1) } - if *verbose { + if verbose { println(parentPath.Child(entry.Name)) } @@ -120,66 +151,8 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. wg.Wait() - if err == nil { + if err == nil && writer != nil { fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) - fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) } - return err - -} -func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, - parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - - K := 5 - - var jobQueueWg sync.WaitGroup - queue := util.NewQueue() - jobQueueWg.Add(1) - queue.Enqueue(parentPath) - var isTerminating bool - - for i := 0; i < K; i++ { - go func() { - for { - if isTerminating { - break - } - t := queue.Dequeue() - if t == nil { - time.Sleep(329 * time.Millisecond) - continue - } - dir := t.(filer2.FullPath) - processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn) - if processErr != nil { - err = processErr - } - jobQueueWg.Done() - } - }() - } - jobQueueWg.Wait() - isTerminating = true - return -} - -func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, - parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, - fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - - return filer2.ReadDirAllEntries(ctx, filerClient, string(parentPath), "", func(entry *filer_pb.Entry, isLast bool) { - - fn(parentPath, entry) - - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) - if parentPath == "/" { - subDir = "/" + entry.Name - } - jobQueueWg.Add(1) - queue.Enqueue(filer2.FullPath(subDir)) - } - }) - } diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 67606ab53..2448c8f61 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -4,10 +4,9 @@ import ( "context" "fmt" "io" - "path/filepath" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -37,37 +36,39 @@ func (c *commandFsMv) Help() string { func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, sourcePath, err := commandEnv.parseUrl(args[0]) + if len(args) != 2 { + return fmt.Errorf("need to have 2 arguments") + } + + sourcePath, err := commandEnv.parseUrl(args[0]) if err != nil { return err } - _, _, destinationPath, err := commandEnv.parseUrl(args[1]) + destinationPath, err := commandEnv.parseUrl(args[1]) if err != nil { return err } - ctx := context.Background() - - sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName() + sourceDir, sourceName := util.FullPath(sourcePath).DirAndName() - destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() + destinationDir, destinationName := util.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ Name: destinationDir, Directory: destinationName, } - respDestinationLookupEntry, err := client.LookupDirectoryEntry(ctx, destinationRequest) + respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest) var targetDir, targetName string // moving a file or folder if err == nil && respDestinationLookupEntry.Entry.IsDirectory { // to a directory - targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName)) + targetDir = util.Join(destinationDir, destinationName) targetName = sourceName } else { // to a file or folder @@ -82,9 +83,9 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer NewName: targetName, } - _, err = client.AtomicRenameEntry(ctx, request) + _, err = client.AtomicRenameEntry(context.Background(), request) - fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName)) + fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, util.NewFullPath(targetDir, targetName)) return err diff --git a/weed/shell/command_fs_pwd.go b/weed/shell/command_fs_pwd.go index 084a5e90a..d7d9819c8 100644 --- a/weed/shell/command_fs_pwd.go +++ b/weed/shell/command_fs_pwd.go @@ -22,11 +22,7 @@ func (c *commandFsPwd) Help() string { func (c *commandFsPwd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - fmt.Fprintf(writer, "http://%s:%d%s\n", - commandEnv.option.FilerHost, - commandEnv.option.FilerPort, - commandEnv.option.Directory, - ) + fmt.Fprintf(writer, "%s\n", commandEnv.option.Directory) return nil } diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index a4524f341..a8c5b2018 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -1,13 +1,12 @@ package shell import ( - "context" "fmt" "io" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -24,22 +23,21 @@ func (c *commandFsTree) Name() string { func (c *commandFsTree) Help() string { return `recursively list all files under a directory - fs.tree http://<filer_server>:<port>/dir/ + fs.tree /some/dir + ` } func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - dir, name := filer2.FullPath(path).DirAndName() - - ctx := context.Background() + dir, name := util.FullPath(path).DirAndName() - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name, newPrefix(), -1) + dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv, util.FullPath(dir), name, newPrefix(), -1) if terr == nil { fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) @@ -49,14 +47,14 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { +func treeTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir util.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { prefix.addMarker(level) - err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) error { if level < 0 && name != "" { if entry.Name != name { - return + return nil } } @@ -64,18 +62,15 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi if entry.IsDirectory { directoryCount++ - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1) + subDir := dir.Child(entry.Name) + dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1) directoryCount += dirCount fileCount += fCount err = terr } else { fileCount++ } - + return nil }) return } diff --git a/weed/shell/command_s3_bucket_create.go b/weed/shell/command_s3_bucket_create.go new file mode 100644 index 000000000..a512ffc4a --- /dev/null +++ b/weed/shell/command_s3_bucket_create.go @@ -0,0 +1,85 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandS3BucketCreate{}) +} + +type commandS3BucketCreate struct { +} + +func (c *commandS3BucketCreate) Name() string { + return "s3.bucket.create" +} + +func (c *commandS3BucketCreate) Help() string { + return `create a bucket with a given name + + Example: + s3.bucket.create -name <bucket_name> -replication 001 +` +} + +func (c *commandS3BucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + replication := bucketCommand.String("replication", "", "replication setting for the bucket, if not set "+ + "it will honor the value defined by the filer if it exists, "+ + "else it will honor the value defined on the master") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %v", err) + } + filerBucketsPath := resp.DirBuckets + + println("create bucket under", filerBucketsPath) + + entry := &filer_pb.Entry{ + Name: *bucketName, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Collection: *bucketName, + Replication: *replication, + }, + } + + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: filerBucketsPath, + Entry: entry, + }); err != nil { + return err + } + + println("created bucket", *bucketName) + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_s3_bucket_delete.go b/weed/shell/command_s3_bucket_delete.go new file mode 100644 index 000000000..a8d8c5c29 --- /dev/null +++ b/weed/shell/command_s3_bucket_delete.go @@ -0,0 +1,54 @@ +package shell + +import ( + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandS3BucketDelete{}) +} + +type commandS3BucketDelete struct { +} + +func (c *commandS3BucketDelete) Name() string { + return "s3.bucket.delete" +} + +func (c *commandS3BucketDelete) Help() string { + return `delete a bucket by a given name + + s3.bucket.delete -name <bucket_name> +` +} + +func (c *commandS3BucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true, false, nil) + +} diff --git a/weed/shell/command_s3_bucket_list.go b/weed/shell/command_s3_bucket_list.go new file mode 100644 index 000000000..4acf9a866 --- /dev/null +++ b/weed/shell/command_s3_bucket_list.go @@ -0,0 +1,78 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandS3BucketList{}) +} + +type commandS3BucketList struct { +} + +func (c *commandS3BucketList) Name() string { + return "s3.bucket.list" +} + +func (c *commandS3BucketList) Help() string { + return `list all buckets + +` +} + +func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error { + if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" { + fmt.Fprintf(writer, " %s\n", entry.Name) + } else { + fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication) + } + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + return err + +} + +func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %v", err) + } + filerBucketsPath = resp.DirBuckets + + return nil + + }) + + return filerBucketsPath, err +} diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go new file mode 100644 index 000000000..1ba31292c --- /dev/null +++ b/weed/shell/command_s3_clean_uploads.go @@ -0,0 +1,92 @@ +package shell + +import ( + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandS3CleanUploads{}) +} + +type commandS3CleanUploads struct { +} + +func (c *commandS3CleanUploads) Name() string { + return "s3.clean.uploads" +} + +func (c *commandS3CleanUploads) Help() string { + return `clean up stale multipart uploads + + Example: + s3.clean.uploads -timeAgo 1.5h + +` +} + +func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + uploadedTimeAgo := bucketCommand.Duration("timeAgo", 24*time.Hour, "created time before now. \"1.5h\" or \"2h45m\". Valid time units are \"m\", \"h\"") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + var buckets []string + err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error { + buckets = append(buckets, entry.Name) + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + for _, bucket := range buckets { + c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo) + } + + return err + +} + +func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io.Writer, filerBucketsPath string, bucket string, timeAgo time.Duration) error { + uploadsDir := filerBucketsPath + "/" + bucket + "/.uploads" + var staleUploads []string + now := time.Now() + err := filer_pb.List(commandEnv, uploadsDir, "", func(entry *filer_pb.Entry, isLast bool) error { + ctime := time.Unix(entry.Attributes.Crtime, 0) + if ctime.Add(timeAgo).Before(now) { + staleUploads = append(staleUploads, entry.Name) + } + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list uploads under %v: %v", uploadsDir, err) + } + + for _, staleUpload := range staleUploads { + deleteUrl := fmt.Sprintf("http://%s:%d%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerHost, commandEnv.option.FilerPort, uploadsDir, staleUpload) + fmt.Fprintf(writer, "purge %s\n", deleteUrl) + + err = util.Delete(deleteUrl, "") + if err != nil { + return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err) + } + } + + return nil + +} diff --git a/weed/shell/command_s3_configure.go b/weed/shell/command_s3_configure.go new file mode 100644 index 000000000..ca51ef72f --- /dev/null +++ b/weed/shell/command_s3_configure.go @@ -0,0 +1,183 @@ +package shell + +import ( + "bytes" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "io" + "sort" + "strings" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func init() { + Commands = append(Commands, &commandS3Configure{}) +} + +type commandS3Configure struct { +} + +func (c *commandS3Configure) Name() string { + return "s3.configure" +} + +func (c *commandS3Configure) Help() string { + return `configure and apply s3 options for each bucket + + # see the current configuration file content + s3.configure + ` +} + +func (c *commandS3Configure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + s3ConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + actions := s3ConfigureCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin") + user := s3ConfigureCommand.String("user", "", "user name") + buckets := s3ConfigureCommand.String("buckets", "", "bucket name") + accessKey := s3ConfigureCommand.String("access_key", "", "specify the access key") + secretKey := s3ConfigureCommand.String("secret_key", "", "specify the secret key") + isDelete := s3ConfigureCommand.Bool("delete", false, "delete users, actions or access keys") + apply := s3ConfigureCommand.Bool("apply", false, "update and apply s3 configuration") + + if err = s3ConfigureCommand.Parse(args); err != nil { + return nil + } + + var buf bytes.Buffer + if err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return filer.ReadEntry(commandEnv.MasterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf) + }); err != nil && err != filer_pb.ErrNotFound { + return err + } + + s3cfg := &iam_pb.S3ApiConfiguration{} + if buf.Len() > 0 { + if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil { + return err + } + } + + idx := 0 + changed := false + if *user != "" { + for i, identity := range s3cfg.Identities { + if *user == identity.Name { + idx = i + changed = true + break + } + } + } + var cmdActions []string + for _, action := range strings.Split(*actions, ",") { + if *buckets == "" { + cmdActions = append(cmdActions, action) + } else { + for _, bucket := range strings.Split(*buckets, ",") { + cmdActions = append(cmdActions, fmt.Sprintf("%s:%s", action, bucket)) + } + } + } + if changed { + if *isDelete { + var exists []int + for _, cmdAction := range cmdActions { + for i, currentAction := range s3cfg.Identities[idx].Actions { + if cmdAction == currentAction { + exists = append(exists, i) + } + } + } + sort.Sort(sort.Reverse(sort.IntSlice(exists))) + for _, i := range exists { + s3cfg.Identities[idx].Actions = append( + s3cfg.Identities[idx].Actions[:i], + s3cfg.Identities[idx].Actions[i+1:]..., + ) + } + if *accessKey != "" { + exists = []int{} + for i, credential := range s3cfg.Identities[idx].Credentials { + if credential.AccessKey == *accessKey { + exists = append(exists, i) + } + } + sort.Sort(sort.Reverse(sort.IntSlice(exists))) + for _, i := range exists { + s3cfg.Identities[idx].Credentials = append( + s3cfg.Identities[idx].Credentials[:i], + s3cfg.Identities[idx].Credentials[:i+1]..., + ) + } + + } + if *actions == "" && *accessKey == "" && *buckets == "" { + s3cfg.Identities = append(s3cfg.Identities[:idx], s3cfg.Identities[idx+1:]...) + } + } else { + if *actions != "" { + for _, cmdAction := range cmdActions { + found := false + for _, action := range s3cfg.Identities[idx].Actions { + if cmdAction == action { + found = true + break + } + } + if !found { + s3cfg.Identities[idx].Actions = append(s3cfg.Identities[idx].Actions, cmdAction) + } + } + } + if *accessKey != "" && *user != "anonymous" { + found := false + for _, credential := range s3cfg.Identities[idx].Credentials { + if credential.AccessKey == *accessKey { + found = true + credential.SecretKey = *secretKey + break + } + } + if !found { + s3cfg.Identities[idx].Credentials = append(s3cfg.Identities[idx].Credentials, &iam_pb.Credential{ + AccessKey: *accessKey, + SecretKey: *secretKey, + }) + } + } + } + } else if *user != "" && *actions != "" { + identity := iam_pb.Identity{ + Name: *user, + Actions: cmdActions, + Credentials: []*iam_pb.Credential{}, + } + if *user != "anonymous" { + identity.Credentials = append(identity.Credentials, + &iam_pb.Credential{AccessKey: *accessKey, SecretKey: *secretKey}) + } + s3cfg.Identities = append(s3cfg.Identities, &identity) + } + + buf.Reset() + filer.S3ConfigurationToText(&buf, s3cfg) + + fmt.Fprintf(writer, string(buf.Bytes())) + fmt.Fprintln(writer) + + if *apply { + + if err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()) + }); err != nil { + return err + } + + } + + return nil +} diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index bed4f4306..ad7da0e44 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -1,9 +1,10 @@ package shell import ( - "context" "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io" "os" "sort" @@ -39,14 +40,15 @@ func (c *commandVolumeBalance) Help() string { } func balanceWritableVolumes(){ - idealWritableVolumes = totalWritableVolumes / numVolumeServers + idealWritableVolumeRatio = totalWritableVolumes / totalNumberOfMaxVolumes for hasMovedOneVolume { - sort all volume servers ordered by the number of local writable volumes - pick the volume server A with the lowest number of writable volumes x - pick the volume server B with the highest number of writable volumes y - if y > idealWritableVolumes and x +1 <= idealWritableVolumes { - if B has a writable volume id v that A does not have { - move writable volume v from A to B + sort all volume servers ordered by the localWritableVolumeRatio = localWritableVolumes to localVolumeMax + pick the volume server B with the highest localWritableVolumeRatio y + for any the volume server A with the number of writable volumes x + 1 <= idealWritableVolumeRatio * localVolumeMax { + if y > localWritableVolumeRatio { + if B has a writable volume id v that A does not have, and satisfy v replication requirements { + move writable volume v from A to B + } } } } @@ -60,6 +62,10 @@ func (c *commandVolumeBalance) Help() string { func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection") dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") @@ -68,55 +74,51 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return nil } - var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + // collect topology information + topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv) if err != nil { return err } - typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc) + volumeServers := collectVolumeServersByDc(topologyInfo, *dc) + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + diskTypes := collectVolumeDiskTypes(topologyInfo) - for maxVolumeCount, volumeServers := range typeToNodes { - if len(volumeServers) < 2 { - fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount) - continue + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv, true, false) + if err != nil { + return err } - if *collection == "EACH_COLLECTION" { - collections, err := ListCollectionNames(commandEnv, true, false) - if err != nil { - return err - } - for _, c := range collections { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil { - return err - } - } - } else if *collection == "ALL_COLLECTIONS" { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { - return err - } - } else { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil { + for _, c := range collections { + if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil { return err } } - + } else if *collection == "ALL_COLLECTIONS" { + if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { + return err + } + } else { + if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil { + return err + } } + return nil } -func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error { +func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { - var nodes []*Node - for _, dn := range dataNodeInfos { - nodes = append(nodes, &Node{ - info: dn, - }) + for _, diskType := range diskTypes { + if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, volumeSizeLimit, collection, applyBalancing); err != nil { + return err + } } + return nil + +} + +func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { // balance writable volumes for _, n := range nodes { @@ -126,10 +128,10 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat return false } } - return !v.ReadOnly && v.Size < volumeSizeLimit + return v.DiskType == string(diskType) && (!v.ReadOnly && v.Size < volumeSizeLimit) }) } - if err := balanceSelectedVolume(commandEnv, nodes, sortWritableVolumes, applyBalancing); err != nil { + if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortWritableVolumes, applyBalancing); err != nil { return err } @@ -141,34 +143,99 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat return false } } - return v.ReadOnly || v.Size >= volumeSizeLimit + return v.DiskType == string(diskType) && (v.ReadOnly || v.Size >= volumeSizeLimit) }) } - if err := balanceSelectedVolume(commandEnv, nodes, sortReadOnlyVolumes, applyBalancing); err != nil { + if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortReadOnlyVolumes, applyBalancing); err != nil { return err } return nil } -func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*master_pb.DataNodeInfo) { - typeToNodes = make(map[uint64][]*master_pb.DataNodeInfo) +func collectVolumeServersByDc(t *master_pb.TopologyInfo, selectedDataCenter string) (nodes []*Node) { for _, dc := range t.DataCenterInfos { if selectedDataCenter != "" && dc.Id != selectedDataCenter { continue } for _, r := range dc.RackInfos { for _, dn := range r.DataNodeInfos { - typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], dn) + nodes = append(nodes, &Node{ + info: dn, + dc: dc.Id, + rack: r.Id, + }) } } } return } +func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskType) { + knownTypes := make(map[string]bool) + for _, dc := range t.DataCenterInfos { + for _, r := range dc.RackInfos { + for _, dn := range r.DataNodeInfos { + for diskType, _ := range dn.DiskInfos { + if _, found := knownTypes[diskType]; !found { + knownTypes[diskType] = true + } + } + } + } + } + for diskType, _ := range knownTypes { + diskTypes = append(diskTypes, types.ToDiskType(diskType)) + } + return +} + type Node struct { info *master_pb.DataNodeInfo selectedVolumes map[uint32]*master_pb.VolumeInformationMessage + dc string + rack string +} + +type CapacityFunc func(*master_pb.DataNodeInfo) int + +func capacityByMaxVolumeCount(diskType types.DiskType) CapacityFunc { + return func(info *master_pb.DataNodeInfo) int { + diskInfo, found := info.DiskInfos[string(diskType)] + if !found { + return 0 + } + return int(diskInfo.MaxVolumeCount) + } +} + +func capacityByFreeVolumeCount(diskType types.DiskType) CapacityFunc { + return func(info *master_pb.DataNodeInfo) int { + diskInfo, found := info.DiskInfos[string(diskType)] + if !found { + return 0 + } + return int(diskInfo.MaxVolumeCount - diskInfo.VolumeCount) + } +} + +func (n *Node) localVolumeRatio(capacityFunc CapacityFunc) float64 { + return divide(len(n.selectedVolumes), capacityFunc(n.info)) +} + +func (n *Node) localVolumeNextRatio(capacityFunc CapacityFunc) float64 { + return divide(len(n.selectedVolumes)+1, capacityFunc(n.info)) +} + +func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) { + n.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage) + for _, diskInfo := range n.info.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + if fn(v) { + n.selectedVolumes[v.Id] = v + } + } + } } func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) { @@ -183,67 +250,152 @@ func sortReadOnlyVolumes(volumes []*master_pb.VolumeInformationMessage) { }) } -func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) error { - selectedVolumeCount := 0 +func balanceSelectedVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, capacityFunc CapacityFunc, sortCandidatesFn func(volumes []*master_pb.VolumeInformationMessage), applyBalancing bool) (err error) { + selectedVolumeCount, volumeMaxCount := 0, 0 + var nodesWithCapacity []*Node for _, dn := range nodes { selectedVolumeCount += len(dn.selectedVolumes) + capacity := capacityFunc(dn.info) + if capacity > 0 { + nodesWithCapacity = append(nodesWithCapacity, dn) + } + volumeMaxCount += capacity } - idealSelectedVolumes := ceilDivide(selectedVolumeCount, len(nodes)) + idealVolumeRatio := divide(selectedVolumeCount, volumeMaxCount) + + hasMoved := true - hasMove := true + // fmt.Fprintf(os.Stdout, " total %d volumes, max %d volumes, idealVolumeRatio %f\n", selectedVolumeCount, volumeMaxCount, idealVolumeRatio) - for hasMove { - hasMove = false - sort.Slice(nodes, func(i, j int) bool { - // TODO sort by free volume slots??? - return len(nodes[i].selectedVolumes) < len(nodes[j].selectedVolumes) + for hasMoved { + hasMoved = false + sort.Slice(nodesWithCapacity, func(i, j int) bool { + return nodesWithCapacity[i].localVolumeRatio(capacityFunc) < nodesWithCapacity[j].localVolumeRatio(capacityFunc) }) - emptyNode, fullNode := nodes[0], nodes[len(nodes)-1] - if len(fullNode.selectedVolumes) > idealSelectedVolumes && len(emptyNode.selectedVolumes)+1 <= idealSelectedVolumes { - // sort the volumes to move - var candidateVolumes []*master_pb.VolumeInformationMessage - for _, v := range fullNode.selectedVolumes { - candidateVolumes = append(candidateVolumes, v) + fullNode := nodesWithCapacity[len(nodesWithCapacity)-1] + var candidateVolumes []*master_pb.VolumeInformationMessage + for _, v := range fullNode.selectedVolumes { + candidateVolumes = append(candidateVolumes, v) + } + sortCandidatesFn(candidateVolumes) + + for i := 0; i < len(nodesWithCapacity)-1; i++ { + emptyNode := nodesWithCapacity[i] + if !(fullNode.localVolumeRatio(capacityFunc) > idealVolumeRatio && emptyNode.localVolumeNextRatio(capacityFunc) <= idealVolumeRatio) { + // no more volume servers with empty slots + break } - sortCandidatesFn(candidateVolumes) - - for _, v := range candidateVolumes { - if _, found := emptyNode.selectedVolumes[v.Id]; !found { - if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil { - delete(fullNode.selectedVolumes, v.Id) - emptyNode.selectedVolumes[v.Id] = v - hasMove = true - break - } else { - return err - } - } + hasMoved, err = attemptToMoveOneVolume(commandEnv, volumeReplicas, fullNode, candidateVolumes, emptyNode, applyBalancing) + if err != nil { + return + } + if hasMoved { + // moved one volume + break } } } return nil } -func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, fullNode *Node, emptyNode *Node, applyBalancing bool) error { +func attemptToMoveOneVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, candidateVolumes []*master_pb.VolumeInformationMessage, emptyNode *Node, applyBalancing bool) (hasMoved bool, err error) { + + for _, v := range candidateVolumes { + hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, fullNode, v, emptyNode, applyBalancing) + if err != nil { + return + } + if hasMoved { + break + } + } + return +} + +func maybeMoveOneVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, candidateVolume *master_pb.VolumeInformationMessage, emptyNode *Node, applyChange bool) (hasMoved bool, err error) { + + if candidateVolume.ReplicaPlacement > 0 { + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(candidateVolume.ReplicaPlacement)) + if !isGoodMove(replicaPlacement, volumeReplicas[candidateVolume.Id], fullNode, emptyNode) { + return false, nil + } + } + if _, found := emptyNode.selectedVolumes[candidateVolume.Id]; !found { + if err = moveVolume(commandEnv, candidateVolume, fullNode, emptyNode, applyChange); err == nil { + adjustAfterMove(candidateVolume, volumeReplicas, fullNode, emptyNode) + return true, nil + } else { + return + } + } + return +} + +func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, fullNode *Node, emptyNode *Node, applyChange bool) error { collectionPrefix := v.Collection + "_" if v.Collection == "" { collectionPrefix = "" } - fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id) - if applyBalancing { - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second) + fmt.Fprintf(os.Stdout, " moving %s volume %s%d %s => %s\n", v.DiskType, collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id) + if applyChange { + return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second, v.DiskType) } return nil } -func (node *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) { - node.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage) - for _, v := range node.info.VolumeInfos { - if fn(v) { - node.selectedVolumes[v.Id] = v +func isGoodMove(placement *super_block.ReplicaPlacement, existingReplicas []*VolumeReplica, sourceNode, targetNode *Node) bool { + for _, replica := range existingReplicas { + if replica.location.dataNode.Id == targetNode.info.Id && + replica.location.rack == targetNode.rack && + replica.location.dc == targetNode.dc { + // never move to existing nodes + return false + } + } + dcs, racks := make(map[string]bool), make(map[string]int) + for _, replica := range existingReplicas { + if replica.location.dataNode.Id != sourceNode.info.Id { + dcs[replica.location.DataCenter()] = true + racks[replica.location.Rack()]++ + } + } + + dcs[targetNode.dc] = true + racks[fmt.Sprintf("%s %s", targetNode.dc, targetNode.rack)]++ + + if len(dcs) != placement.DiffDataCenterCount+1 { + return false + } + + if len(racks) != placement.DiffRackCount+placement.DiffDataCenterCount+1 { + return false + } + + for _, sameRackCount := range racks { + if sameRackCount != placement.SameRackCount+1 { + return false + } + } + + return true + +} + +func adjustAfterMove(v *master_pb.VolumeInformationMessage, volumeReplicas map[uint32][]*VolumeReplica, fullNode *Node, emptyNode *Node) { + delete(fullNode.selectedVolumes, v.Id) + if emptyNode.selectedVolumes != nil { + emptyNode.selectedVolumes[v.Id] = v + } + existingReplicas := volumeReplicas[v.Id] + for _, replica := range existingReplicas { + if replica.location.dataNode.Id == fullNode.info.Id && + replica.location.rack == fullNode.rack && + replica.location.dc == fullNode.dc { + loc := newLocation(emptyNode.dc, emptyNode.rack, emptyNode.info) + replica.location = &loc + return } } } diff --git a/weed/shell/command_volume_balance_test.go b/weed/shell/command_volume_balance_test.go new file mode 100644 index 000000000..b77811f51 --- /dev/null +++ b/weed/shell/command_volume_balance_test.go @@ -0,0 +1,183 @@ +package shell + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +type testMoveCase struct { + name string + replication string + replicas []*VolumeReplica + sourceLocation location + targetLocation location + expected bool +} + +func TestIsGoodMove(t *testing.T) { + + var tests = []testMoveCase{ + + { + name: "test 100 move to wrong data centers", + replication: "100", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + targetLocation: location{"dc2", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: false, + }, + + { + name: "test 100 move to spread into proper data centers", + replication: "100", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + + { + name: "test move to the same node", + replication: "001", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + expected: false, + }, + + { + name: "test move to the same rack, but existing node", + replication: "001", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + expected: false, + }, + + { + name: "test move to the same rack, a new node", + replication: "001", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + + { + name: "test 010 move all to the same rack", + replication: "010", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: false, + }, + + { + name: "test 010 move to spread racks", + replication: "010", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + + { + name: "test 010 move to spread racks", + replication: "010", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + sourceLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + targetLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + } + + for _, tt := range tests { + replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication) + println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name) + sourceNode := &Node{ + info: tt.sourceLocation.dataNode, + dc: tt.sourceLocation.dc, + rack: tt.sourceLocation.rack, + } + targetNode := &Node{ + info: tt.targetLocation.dataNode, + dc: tt.targetLocation.dc, + rack: tt.targetLocation.rack, + } + if isGoodMove(replicaPlacement, tt.replicas, sourceNode, targetNode) != tt.expected { + t.Errorf("%s: expect %v move from %v to %s, replication:%v", + tt.name, tt.expected, tt.sourceLocation, tt.targetLocation, tt.replication) + } + } + +} + +func TestBalance(t *testing.T) { + topologyInfo := parseOutput(topoData) + volumeServers := collectVolumeServersByDc(topologyInfo, "") + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + diskTypes := collectVolumeDiskTypes(topologyInfo) + + if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil { + t.Errorf("balance: %v", err) + } + +} diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go new file mode 100644 index 000000000..0f156ac2f --- /dev/null +++ b/weed/shell/command_volume_check_disk.go @@ -0,0 +1,258 @@ +package shell + +import ( + "bytes" + "context" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "io" + "math" + "sort" +) + +func init() { + Commands = append(Commands, &commandVolumeCheckDisk{}) +} + +type commandVolumeCheckDisk struct { + env *CommandEnv +} + +func (c *commandVolumeCheckDisk) Name() string { + return "volume.check.disk" +} + +func (c *commandVolumeCheckDisk) Help() string { + return `check all replicated volumes to find and fix inconsistencies + + How it works: + + find all volumes that are replicated + for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count. + for the pair volume A and B + append entries in A and not in B to B + append entries in B and not in A to A + +` +} + +func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same") + verbose := fsckCommand.Bool("v", false, "verbose mode") + applyChanges := fsckCommand.Bool("force", false, "apply the fix") + nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit") + if err = fsckCommand.Parse(args); err != nil { + return nil + } + + c.env = commandEnv + + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + + // pick 1 pairs of volume replica + fileCount := func(replica *VolumeReplica) uint64 { + return replica.info.FileCount - replica.info.DeleteCount + } + aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb() + defer aDB.Close() + defer bDB.Close() + + for _, replicas := range volumeReplicas { + sort.Slice(replicas, func(i, j int) bool { + return fileCount(replicas[i]) > fileCount(replicas[j]) + }) + for len(replicas) >= 2 { + a, b := replicas[0], replicas[1] + if !*slowMode { + if fileCount(a) == fileCount(b) { + replicas = replicas[1:] + continue + } + } + if a.info.ReadOnly || b.info.ReadOnly { + fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id) + replicas = replicas[1:] + continue + } + + // reset index db + aDB.Close() + bDB.Close() + aDB, bDB = needle_map.NewMemDb(), needle_map.NewMemDb() + + // read index db + if err := c.readIndexDatabase(aDB, a.info.Collection, a.info.Id, a.location.dataNode.Id, *verbose, writer); err != nil { + return err + } + if err := c.readIndexDatabase(bDB, b.info.Collection, b.info.Id, b.location.dataNode.Id, *verbose, writer); err != nil { + return err + } + + // find and make up the differnces + if err := c.doVolumeCheckDisk(aDB, bDB, a, b, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil { + return err + } + if err := c.doVolumeCheckDisk(bDB, aDB, b, a, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil { + return err + } + replicas = replicas[1:] + } + } + + return nil +} + +func (c *commandVolumeCheckDisk) doVolumeCheckDisk(subtrahend, minuend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, nonRepairThreshold float64) error { + + // find missing keys + // hash join, can be more efficient + var missingNeedles []needle_map.NeedleValue + var counter int + subtrahend.AscendingVisit(func(value needle_map.NeedleValue) error { + counter++ + if _, found := minuend.Get(value.Key); !found { + missingNeedles = append(missingNeedles, value) + } + return nil + }) + + fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d entries\n", source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles)) + + if counter == 0 || len(missingNeedles) == 0 { + return nil + } + + missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter) + if missingNeedlesFraction > nonRepairThreshold { + return fmt.Errorf( + "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f", + source.info.Id, missingNeedlesFraction, nonRepairThreshold) + } + + for _, needleValue := range missingNeedles { + + needleBlob, err := c.readSourceNeedleBlob(source.location.dataNode.Id, source.info.Id, needleValue) + if err != nil { + return err + } + + if !applyChanges { + continue + } + + if verbose { + fmt.Fprintf(writer, "read %d,%x %s => %s \n", source.info.Id, needleValue.Key, source.location.dataNode.Id, target.location.dataNode.Id) + } + + if err := c.writeNeedleBlobToTarget(target.location.dataNode.Id, source.info.Id, needleValue, needleBlob); err != nil { + return err + } + + } + + return nil +} + +func (c *commandVolumeCheckDisk) readSourceNeedleBlob(sourceVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) { + + err = operation.WithVolumeServerClient(sourceVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{ + VolumeId: volumeId, + NeedleId: uint64(needleValue.Key), + Offset: needleValue.Offset.ToActualOffset(), + Size: int32(needleValue.Size), + }) + if err != nil { + return err + } + needleBlob = resp.NeedleBlob + return nil + }) + return +} + +func (c *commandVolumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error { + + return operation.WithVolumeServerClient(targetVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{ + VolumeId: volumeId, + NeedleId: uint64(needleValue.Key), + Size: int32(needleValue.Size), + NeedleBlob: needleBlob, + }) + return err + }) + +} + +func (c *commandVolumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer string, verbose bool, writer io.Writer) error { + + var buf bytes.Buffer + if err := c.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer); err != nil { + return err + } + + if verbose { + fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer) + } + + return db.LoadFromReaderAt(bytes.NewReader(buf.Bytes())) + +} + +func (c *commandVolumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer string, buf *bytes.Buffer, verbose bool, writer io.Writer) error { + + return operation.WithVolumeServerClient(volumeServer, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + ext := ".idx" + + copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ + VolumeId: volumeId, + Ext: ".idx", + CompactionRevision: math.MaxUint32, + StopOffset: math.MaxInt64, + Collection: collection, + IsEcVolume: false, + IgnoreSourceFileNotFound: false, + }) + if err != nil { + return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err) + } + + err = writeToBuffer(copyFileClient, buf) + if err != nil { + return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err) + } + + return nil + + }) +} + +func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error { + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving: %v", receiveErr) + } + buf.Write(resp.FileContent) + } + return nil +} diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go new file mode 100644 index 000000000..e3f034873 --- /dev/null +++ b/weed/shell/command_volume_configure_replication.go @@ -0,0 +1,107 @@ +package shell + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func init() { + Commands = append(Commands, &commandVolumeConfigureReplication{}) +} + +type commandVolumeConfigureReplication struct { +} + +func (c *commandVolumeConfigureReplication) Name() string { + return "volume.configure.replication" +} + +func (c *commandVolumeConfigureReplication) Help() string { + return `change volume replication value + + This command changes a volume replication value. It should be followed by "volume.fix.replication". + +` +} + +func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id") + replicationString := configureReplicationCommand.String("replication", "", "the intended replication value") + if err = configureReplicationCommand.Parse(args); err != nil { + return nil + } + + if *replicationString == "" { + return fmt.Errorf("empty replication value") + } + + replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString) + if err != nil { + return fmt.Errorf("replication format: %v", err) + } + replicaPlacementInt32 := uint32(replicaPlacement.Byte()) + + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + vid := needle.VolumeId(*volumeIdInt) + + // find all data nodes with volumes that needs replication change + var allLocations []location + eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + loc := newLocation(dc, string(rack), dn) + for _, diskInfo := range dn.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 { + allLocations = append(allLocations, loc) + continue + } + } + } + }) + + if len(allLocations) == 0 { + return fmt.Errorf("no volume needs change") + } + + for _, dst := range allLocations { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{ + VolumeId: uint32(vid), + Replication: replicaPlacement.String(), + }) + if configureErr != nil { + return configureErr + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil + }) + + if err != nil { + return err + } + + } + + return nil +} diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go index 1c83ba655..85b4bb095 100644 --- a/weed/shell/command_volume_copy.go +++ b/weed/shell/command_volume_copy.go @@ -1,7 +1,7 @@ package shell import ( - "context" + "flag" "fmt" "io" @@ -22,7 +22,7 @@ func (c *commandVolumeCopy) Name() string { func (c *commandVolumeCopy) Help() string { return `copy a volume from one volume server to another volume server - volume.copy <source volume server host:port> <target volume server host:port> <volume id> + volume.copy -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id> This command copies a volume from one volume server to another volume server. Usually you will want to unmount the volume first before copying. @@ -32,22 +32,26 @@ func (c *commandVolumeCopy) Help() string { func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) != 3 { - fmt.Fprintf(writer, "received args: %+v\n", args) - return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>") + if err = commandEnv.confirmIsLocked(); err != nil { + return } - sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2] - volumeId, err := needle.NewVolumeId(volumeIdString) - if err != nil { - return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) + volCopyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volCopyCommand.Int("volumeId", 0, "the volume id") + sourceNodeStr := volCopyCommand.String("source", "", "the source volume server <host>:<port>") + targetNodeStr := volCopyCommand.String("target", "", "the target volume server <host>:<port>") + if err = volCopyCommand.Parse(args); err != nil { + return nil } + sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + if sourceVolumeServer == targetVolumeServer { return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - _, err = copyVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, "") return } diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go index 17d27ea3a..187caa1a4 100644 --- a/weed/shell/command_volume_delete.go +++ b/weed/shell/command_volume_delete.go @@ -1,8 +1,7 @@ package shell import ( - "context" - "fmt" + "flag" "io" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -22,7 +21,7 @@ func (c *commandVolumeDelete) Name() string { func (c *commandVolumeDelete) Help() string { return `delete a live volume from one volume server - volume.delete <volume server host:port> <volume id> + volume.delete -node <volume server host:port> -volumeId <volume id> This command deletes a volume from one volume server. @@ -31,18 +30,21 @@ func (c *commandVolumeDelete) Help() string { func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) != 2 { - fmt.Fprintf(writer, "received args: %+v\n", args) - return fmt.Errorf("need 2 args of <volume server host:port> <volume id>") + if err = commandEnv.confirmIsLocked(); err != nil { + return } - sourceVolumeServer, volumeIdString := args[0], args[1] - volumeId, err := needle.NewVolumeId(volumeIdString) - if err != nil { - return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) + volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volDeleteCommand.Int("volumeId", 0, "the volume id") + nodeStr := volDeleteCommand.String("node", "", "the volume server <host>:<port>") + if err = volDeleteCommand.Parse(args); err != nil { + return nil } - ctx := context.Background() - return deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + sourceVolumeServer := *nodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + + return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 6f35dd5d2..538351fd0 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -2,9 +2,12 @@ package shell import ( "context" + "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io" - "math/rand" + "path/filepath" "sort" "github.com/chrislusf/seaweedfs/weed/operation" @@ -18,6 +21,7 @@ func init() { } type commandVolumeFixReplication struct { + collectionPattern *string } func (c *commandVolumeFixReplication) Name() string { @@ -27,16 +31,19 @@ func (c *commandVolumeFixReplication) Name() string { func (c *commandVolumeFixReplication) Help() string { return `add replicas to volumes that are missing replicas - This command file all under-replicated volumes, and find volume servers with free slots. + This command finds all over-replicated volumes. If found, it will purge the oldest copies and stop. + + This command also finds all under-replicated volumes, and finds volume servers with free slots. If the free slots satisfy the replication requirement, the volume content is copied over and mounted. - volume.fix.replication -n # do not take action - volume.fix.replication # actually copying the volume files and mount the volume + volume.fix.replication -n # do not take action + volume.fix.replication # actually deleting or copying the volume files and mount the volume + volume.fix.replication -collectionPattern=important* # fix any collections with prefix "important" Note: - * each time this will only add back one replica for one volume id. If there are multiple replicas - are missing, e.g. multiple volume servers are new, you may need to run this multiple times. - * do not run this too quick within seconds, since the new volume replica may take a few seconds + * each time this will only add back one replica for each volume id that is under replicated. + If there are multiple replicas are missing, e.g. replica count is > 2, you may need to run this multiple times. + * do not run this too quickly within seconds, since the new volume replica may take a few seconds to register itself to the master. ` @@ -44,81 +51,151 @@ func (c *commandVolumeFixReplication) Help() string { func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - takeAction := true - if len(args) > 0 && args[0] == "-n" { - takeAction = false + if err = commandEnv.confirmIsLocked(); err != nil { + return } - var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.collectionPattern = volFixReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'") + skipChange := volFixReplicationCommand.Bool("n", false, "skip the changes") + if err = volFixReplicationCommand.Parse(args); err != nil { + return nil + } + + takeAction := !*skipChange + + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) if err != nil { return err } // find all volumes that needs replication // collect all data nodes - replicatedVolumeLocations := make(map[uint32][]location) - replicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage) + volumeReplicas, allLocations := collectVolumeReplicaLocations(topologyInfo) + + if len(allLocations) == 0 { + return fmt.Errorf("no data nodes at all") + } + + // find all under replicated volumes + var underReplicatedVolumeIds, overReplicatedVolumeIds []uint32 + for vid, replicas := range volumeReplicas { + replica := replicas[0] + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement)) + if replicaPlacement.GetCopyCount() > len(replicas) { + underReplicatedVolumeIds = append(underReplicatedVolumeIds, vid) + } else if replicaPlacement.GetCopyCount() < len(replicas) { + overReplicatedVolumeIds = append(overReplicatedVolumeIds, vid) + fmt.Fprintf(writer, "volume %d replication %s, but over replicated %+d\n", replica.info.Id, replicaPlacement, len(replicas)) + } + } + + if len(overReplicatedVolumeIds) > 0 { + return c.fixOverReplicatedVolumes(commandEnv, writer, takeAction, overReplicatedVolumeIds, volumeReplicas, allLocations) + } + + if len(underReplicatedVolumeIds) == 0 { + return nil + } + + // find the most under populated data nodes + return c.fixUnderReplicatedVolumes(commandEnv, writer, takeAction, underReplicatedVolumeIds, volumeReplicas, allLocations) + +} + +func collectVolumeReplicaLocations(topologyInfo *master_pb.TopologyInfo) (map[uint32][]*VolumeReplica, []location) { + volumeReplicas := make(map[uint32][]*VolumeReplica) var allLocations []location - eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { loc := newLocation(dc, string(rack), dn) - for _, v := range dn.VolumeInfos { - if v.ReplicaPlacement > 0 { - replicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc) - replicatedVolumeInfo[v.Id] = v + for _, diskInfo := range dn.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{ + location: &loc, + info: v, + }) } } allLocations = append(allLocations, loc) }) + return volumeReplicas, allLocations +} - // find all under replicated volumes - underReplicatedVolumeLocations := make(map[uint32][]location) - for vid, locations := range replicatedVolumeLocations { - volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) - if replicaPlacement.GetCopyCount() > len(locations) { - underReplicatedVolumeLocations[vid] = locations +func (c *commandVolumeFixReplication) fixOverReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, overReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error { + for _, vid := range overReplicatedVolumeIds { + replicas := volumeReplicas[vid] + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replicas[0].info.ReplicaPlacement)) + + replica := pickOneReplicaToDelete(replicas, replicaPlacement) + + // check collection name pattern + if *c.collectionPattern != "" { + matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection) + if err != nil { + return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err) + } + if !matched { + break + } } - } - if len(underReplicatedVolumeLocations) == 0 { - return fmt.Errorf("no under replicated volumes") - } + fmt.Fprintf(writer, "deleting volume %d from %s ...\n", replica.info.Id, replica.location.dataNode.Id) + + if !takeAction { + break + } + + if err := deleteVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(replica.info.Id), replica.location.dataNode.Id); err != nil { + return fmt.Errorf("deleting volume %d from %s : %v", replica.info.Id, replica.location.dataNode.Id, err) + } - if len(allLocations) == 0 { - return fmt.Errorf("no data nodes at all") } + return nil +} - // find the most under populated data nodes - keepDataNodesSorted(allLocations) +func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, underReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error { - for vid, locations := range underReplicatedVolumeLocations { - volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + for _, vid := range underReplicatedVolumeIds { + replicas := volumeReplicas[vid] + replica := pickOneReplicaToCopyFrom(replicas) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement)) foundNewLocation := false + hasSkippedCollection := false + keepDataNodesSorted(allLocations, types.ToDiskType(replica.info.DiskType)) + fn := capacityByFreeVolumeCount(types.ToDiskType(replica.info.DiskType)) for _, dst := range allLocations { // check whether data nodes satisfy the constraints - if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) { + if fn(dst.dataNode) > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) { + // check collection name pattern + if *c.collectionPattern != "" { + matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection) + if err != nil { + return fmt.Errorf("match pattern %s with collection %s: %v", *c.collectionPattern, replica.info.Collection, err) + } + if !matched { + hasSkippedCollection = true + break + } + } + // ask the volume server to replicate the volume - sourceNodes := underReplicatedVolumeLocations[vid] - sourceNode := sourceNodes[rand.Intn(len(sourceNodes))] foundNewLocation = true - fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id) + fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", replica.info.Id, replicaPlacement, replica.location.dataNode.Id, dst.dataNode.Id) if !takeAction { break } err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ - VolumeId: volumeInfo.Id, - SourceDataNode: sourceNode.dataNode.Id, + _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ + VolumeId: replica.info.Id, + SourceDataNode: replica.location.dataNode.Id, }) - return replicateErr + if replicateErr != nil { + return fmt.Errorf("copying from %s => %s : %v", replica.location.dataNode.Id, dst.dataNode.Id, replicateErr) + } + return nil }) if err != nil { @@ -126,54 +203,152 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, } // adjust free volume count - dst.dataNode.FreeVolumeCount-- - keepDataNodesSorted(allLocations) + dst.dataNode.DiskInfos[replica.info.DiskType].FreeVolumeCount-- break } } - if !foundNewLocation { - fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", volumeInfo.Id, replicaPlacement, locations) + + if !foundNewLocation && !hasSkippedCollection { + fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", replica.info.Id, replicaPlacement, len(replicas)) } } - return nil } -func keepDataNodesSorted(dataNodes []location) { +func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) { + fn := capacityByFreeVolumeCount(diskType) sort.Slice(dataNodes, func(i, j int) bool { - return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount + return fn(dataNodes[i].dataNode) > fn(dataNodes[j].dataNode) }) } -func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { +/* + if on an existing data node { + return false + } + if different from existing dcs { + if lack on different dcs { + return true + }else{ + return false + } + } + if not on primary dc { + return false + } + if different from existing racks { + if lack on different racks { + return true + }else{ + return false + } + } + if not on primary rack { + return false + } + if lacks on same rack { + return true + } else { + return false + } +*/ +func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, replicas []*VolumeReplica, possibleLocation location) bool { + + existingDataCenters, _, existingDataNodes := countReplicas(replicas) + + if _, found := existingDataNodes[possibleLocation.String()]; found { + // avoid duplicated volume on the same data node + return false + } - existingDataCenters := make(map[string]bool) - existingRacks := make(map[string]bool) - existingDataNodes := make(map[string]bool) - for _, loc := range existingLocations { - existingDataCenters[loc.DataCenter()] = true - existingRacks[loc.Rack()] = true - existingDataNodes[loc.String()] = true + primaryDataCenters, _ := findTopKeys(existingDataCenters) + + // ensure data center count is within limit + if _, found := existingDataCenters[possibleLocation.DataCenter()]; !found { + // different from existing dcs + if len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 { + // lack on different dcs + return true + } else { + // adding this would go over the different dcs limit + return false + } + } + // now this is same as one of the existing data center + if !isAmong(possibleLocation.DataCenter(), primaryDataCenters) { + // not on one of the primary dcs + return false } - if replicaPlacement.DiffDataCenterCount >= len(existingDataCenters) { - // check dc, good if different from any existing data centers - _, found := existingDataCenters[possibleLocation.DataCenter()] - return !found - } else if replicaPlacement.DiffRackCount >= len(existingRacks) { - // check rack, good if different from any existing racks - _, found := existingRacks[possibleLocation.Rack()] - return !found - } else if replicaPlacement.SameRackCount >= len(existingDataNodes) { - // check data node, good if different from any existing data nodes - _, found := existingDataNodes[possibleLocation.String()] - return !found + // now this is one of the primary dcs + primaryDcRacks := make(map[string]int) + for _, replica := range replicas { + if replica.location.DataCenter() != possibleLocation.DataCenter() { + continue + } + primaryDcRacks[replica.location.Rack()] += 1 + } + primaryRacks, _ := findTopKeys(primaryDcRacks) + sameRackCount := primaryDcRacks[possibleLocation.Rack()] + + // ensure rack count is within limit + if _, found := primaryDcRacks[possibleLocation.Rack()]; !found { + // different from existing racks + if len(primaryDcRacks) < replicaPlacement.DiffRackCount+1 { + // lack on different racks + return true + } else { + // adding this would go over the different racks limit + return false + } } + // now this is same as one of the existing racks + if !isAmong(possibleLocation.Rack(), primaryRacks) { + // not on the primary rack + return false + } + + // now this is on the primary rack + + // different from existing data nodes + if sameRackCount < replicaPlacement.SameRackCount+1 { + // lack on same rack + return true + } else { + // adding this would go over the same data node limit + return false + } + +} + +func findTopKeys(m map[string]int) (topKeys []string, max int) { + for k, c := range m { + if max < c { + topKeys = topKeys[:0] + topKeys = append(topKeys, k) + max = c + } else if max == c { + topKeys = append(topKeys, k) + } + } + return +} +func isAmong(key string, keys []string) bool { + for _, k := range keys { + if k == key { + return true + } + } return false } +type VolumeReplica struct { + location *location + info *master_pb.VolumeInformationMessage +} + type location struct { dc string rack string @@ -199,3 +374,45 @@ func (l location) Rack() string { func (l location) DataCenter() string { return l.dc } + +func pickOneReplicaToCopyFrom(replicas []*VolumeReplica) *VolumeReplica { + mostRecent := replicas[0] + for _, replica := range replicas { + if replica.info.ModifiedAtSecond > mostRecent.info.ModifiedAtSecond { + mostRecent = replica + } + } + return mostRecent +} + +func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[string]int) { + diffDc = make(map[string]int) + diffRack = make(map[string]int) + diffNode = make(map[string]int) + for _, replica := range replicas { + diffDc[replica.location.DataCenter()] += 1 + diffRack[replica.location.Rack()] += 1 + diffNode[replica.location.String()] += 1 + } + return +} + +func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica { + + sort.Slice(replicas, func(i, j int) bool { + a, b := replicas[i], replicas[j] + if a.info.CompactRevision != b.info.CompactRevision { + return a.info.CompactRevision < b.info.CompactRevision + } + if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond { + return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond + } + if a.info.Size != b.info.Size { + return a.info.Size < b.info.Size + } + return false + }) + + return replicas[0] + +} diff --git a/weed/shell/command_volume_fix_replication_test.go b/weed/shell/command_volume_fix_replication_test.go new file mode 100644 index 000000000..bb61be1ef --- /dev/null +++ b/weed/shell/command_volume_fix_replication_test.go @@ -0,0 +1,273 @@ +package shell + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +type testcase struct { + name string + replication string + replicas []*VolumeReplica + possibleLocation location + expected bool +} + +func TestSatisfyReplicaPlacementComplicated(t *testing.T) { + + var tests = []testcase{ + { + name: "test 100 negative", + replication: "100", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + }, + possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + expected: false, + }, + { + name: "test 100 positive", + replication: "100", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + }, + possibleLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + expected: true, + }, + { + name: "test 022 positive", + replication: "022", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: true, + }, + { + name: "test 022 negative", + replication: "022", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: false, + }, + { + name: "test 210 moved from 200 positive", + replication: "210", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: true, + }, + { + name: "test 210 moved from 200 negative extra dc", + replication: "210", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc4", "r4", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: false, + }, + { + name: "test 210 moved from 200 negative extra data node", + replication: "210", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: false, + }, + } + + runTests(tests, t) + +} + +func TestSatisfyReplicaPlacement01x(t *testing.T) { + + var tests = []testcase{ + { + name: "test 011 same existing rack", + replication: "011", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + { + name: "test 011 negative", + replication: "011", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: false, + }, + { + name: "test 011 different existing racks", + replication: "011", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + { + name: "test 011 different existing racks negative", + replication: "011", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: false, + }, + } + + runTests(tests, t) + +} + +func TestSatisfyReplicaPlacement00x(t *testing.T) { + + var tests = []testcase{ + { + name: "test 001", + replication: "001", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + expected: true, + }, + { + name: "test 002 positive", + replication: "002", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}}, + expected: true, + }, + { + name: "test 002 negative, repeat the same node", + replication: "002", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + expected: false, + }, + { + name: "test 002 negative, enough node already", + replication: "002", + replicas: []*VolumeReplica{ + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}}, + }, + { + location: &location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}}, + }, + }, + possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}}, + expected: false, + }, + } + + runTests(tests, t) + +} + +func runTests(tests []testcase, t *testing.T) { + for _, tt := range tests { + replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication) + println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name) + if satisfyReplicaPlacement(replicaPlacement, tt.replicas, tt.possibleLocation) != tt.expected { + t.Errorf("%s: expect %v add %v to %s %+v", + tt.name, tt.expected, tt.possibleLocation, tt.replication, tt.replicas) + } + } +} diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go new file mode 100644 index 000000000..1fbc9ad35 --- /dev/null +++ b/weed/shell/command_volume_fsck.go @@ -0,0 +1,372 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandVolumeFsck{}) +} + +type commandVolumeFsck struct { + env *CommandEnv +} + +func (c *commandVolumeFsck) Name() string { + return "volume.fsck" +} + +func (c *commandVolumeFsck) Help() string { + return `check all volumes to find entries not used by the filer + + Important assumption!!! + the system is all used by one filer. + + This command works this way: + 1. collect all file ids from all volumes, as set A + 2. collect all file ids from the filer, as set B + 3. find out the set A subtract B + +` +} + +func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + verbose := fsckCommand.Bool("v", false, "verbose mode") + applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only> delete data not referenced by the filer") + if err = fsckCommand.Parse(args); err != nil { + return nil + } + + c.env = commandEnv + + // create a temp folder + tempFolder, err := ioutil.TempDir("", "sw_fsck") + if err != nil { + return fmt.Errorf("failed to create temp folder: %v", err) + } + if *verbose { + fmt.Fprintf(writer, "working directory: %s\n", tempFolder) + } + defer os.RemoveAll(tempFolder) + + // collect all volume id locations + volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer) + if err != nil { + return fmt.Errorf("failed to collect all volume locations: %v", err) + } + + // collect each volume file ids + for volumeId, vinfo := range volumeIdToVInfo { + err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer) + if err != nil { + return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err) + } + } + + // collect all filer file ids + if err = c.collectFilerFileIds(tempFolder, volumeIdToVInfo, *verbose, writer); err != nil { + return fmt.Errorf("failed to collect file ids from filer: %v", err) + } + + // volume file ids substract filer file ids + var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64 + for volumeId, vinfo := range volumeIdToVInfo { + inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, *verbose) + if checkErr != nil { + return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr) + } + totalInUseCount += inUseCount + totalOrphanChunkCount += uint64(len(orphanFileIds)) + totalOrphanDataSize += orphanDataSize + + if *verbose { + for _, fid := range orphanFileIds { + fmt.Fprintf(writer, "%sxxxxxxxx\n", fid) + } + } + + if *applyPurging && len(orphanFileIds) > 0 { + if vinfo.isEcVolume { + fmt.Fprintf(writer, "Skip purging for Erasure Coded volumes.\n") + } + if err = c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil { + return fmt.Errorf("purge for volume %d: %v\n", volumeId, err) + } + } + } + + if totalOrphanChunkCount == 0 { + fmt.Fprintf(writer, "no orphan data\n") + return nil + } + + if !*applyPurging { + pct := float64(totalOrphanChunkCount*100) / (float64(totalOrphanChunkCount + totalInUseCount)) + fmt.Fprintf(writer, "\nTotal\t\tentries:%d\torphan:%d\t%.2f%%\t%dB\n", + totalOrphanChunkCount+totalInUseCount, totalOrphanChunkCount, pct, totalOrphanDataSize) + + fmt.Fprintf(writer, "This could be normal if multiple filers or no filers are used.\n") + } + + return nil +} + +func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error { + + if verbose { + fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server) + } + + return operation.WithVolumeServerClient(vinfo.server, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + ext := ".idx" + if vinfo.isEcVolume { + ext = ".ecx" + } + + copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ + VolumeId: volumeId, + Ext: ext, + CompactionRevision: math.MaxUint32, + StopOffset: math.MaxInt64, + Collection: vinfo.collection, + IsEcVolume: vinfo.isEcVolume, + IgnoreSourceFileNotFound: false, + }) + if err != nil { + return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err) + } + + err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId)) + if err != nil { + return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err) + } + + return nil + + }) + +} + +func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToServer map[uint32]VInfo, verbose bool, writer io.Writer) error { + + if verbose { + fmt.Fprintf(writer, "collecting file ids from filer ...\n") + } + + files := make(map[uint32]*os.File) + for vid := range volumeIdToServer { + dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr) + } + files[vid] = dst + } + defer func() { + for _, f := range files { + f.Close() + } + }() + + type Item struct { + vid uint32 + fileKey uint64 + } + return doTraverseBfsAndSaving(c.env, nil, "/", false, func(outputChan chan interface{}) { + buffer := make([]byte, 8) + for item := range outputChan { + i := item.(*Item) + util.Uint64toBytes(buffer, i.fileKey) + files[i.vid].Write(buffer) + } + }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { + dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks) + if resolveErr != nil { + return nil + } + dChunks = append(dChunks, mChunks...) + for _, chunk := range dChunks { + outputChan <- &Item{ + vid: chunk.Fid.VolumeId, + fileKey: chunk.Fid.FileKey, + } + } + return nil + }) +} + +func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) { + + db := needle_map.NewMemDb() + defer db.Close() + + if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil { + return + } + + filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId)) + if err != nil { + return + } + + dataLen := len(filerFileIdsData) + if dataLen%8 != 0 { + return 0, nil, 0, fmt.Errorf("filer data is corrupted") + } + + for i := 0; i < len(filerFileIdsData); i += 8 { + fileKey := util.BytesToUint64(filerFileIdsData[i : i+8]) + db.Delete(types.NeedleId(fileKey)) + inUseCount++ + } + + var orphanFileCount uint64 + db.AscendingVisit(func(n needle_map.NeedleValue) error { + // fmt.Printf("%d,%x\n", volumeId, n.Key) + orphanFileIds = append(orphanFileIds, fmt.Sprintf("%d,%s", volumeId, n.Key.String())) + orphanFileCount++ + orphanDataSize += uint64(n.Size) + return nil + }) + + if orphanFileCount > 0 { + pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount)) + fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n", + volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize) + } + + return + +} + +type VInfo struct { + server string + collection string + isEcVolume bool +} + +func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) { + + if verbose { + fmt.Fprintf(writer, "collecting volume id and locations from master ...\n") + } + + volumeIdToServer = make(map[uint32]VInfo) + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) + if err != nil { + return + } + + eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) { + for _, diskInfo := range t.DiskInfos { + for _, vi := range diskInfo.VolumeInfos { + volumeIdToServer[vi.Id] = VInfo{ + server: t.Id, + collection: vi.Collection, + isEcVolume: false, + } + } + for _, ecShardInfo := range diskInfo.EcShardInfos { + volumeIdToServer[ecShardInfo.Id] = VInfo{ + server: t.Id, + collection: ecShardInfo.Collection, + isEcVolume: true, + } + } + } + }) + + if verbose { + fmt.Fprintf(writer, "collected %d volumes and locations.\n", len(volumeIdToServer)) + } + return +} + +func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []string, writer io.Writer) (err error) { + fmt.Fprintf(writer, "purging orphan data for volume %d...\n", volumeId) + locations, found := c.env.MasterClient.GetLocations(volumeId) + if !found { + return fmt.Errorf("failed to find volume %d locations", volumeId) + } + + resultChan := make(chan []*volume_server_pb.DeleteResult, len(locations)) + var wg sync.WaitGroup + for _, location := range locations { + wg.Add(1) + go func(server string, fidList []string) { + defer wg.Done() + + if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil { + err = deleteErr + } else if deleteResults != nil { + resultChan <- deleteResults + } + + }(location.Url, fileIds) + } + wg.Wait() + close(resultChan) + + for results := range resultChan { + for _, result := range results { + if result.Error != "" { + fmt.Fprintf(writer, "purge error: %s\n", result.Error) + } + } + } + + return +} + +func getVolumeFileIdFile(tempFolder string, vid uint32) string { + return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid)) +} + +func getFilerFileIdFile(tempFolder string, vid uint32) string { + return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid)) +} + +func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error { + flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC + dst, err := os.OpenFile(fileName, flags, 0644) + if err != nil { + return nil + } + defer dst.Close() + + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving %s: %v", fileName, receiveErr) + } + dst.Write(resp.FileContent) + } + return nil +} diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index c6c79d150..9856de10b 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -1,7 +1,7 @@ package shell import ( - "context" + "bytes" "fmt" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" @@ -31,22 +31,35 @@ func (c *commandVolumeList) Help() string { func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + // collect topology information + topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv) if err != nil { return err } - writeTopologyInfo(writer, resp.TopologyInfo, resp.VolumeSizeLimitMb) + writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb) return nil } +func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string { + var buf bytes.Buffer + for diskType, diskInfo := range diskInfos { + if diskType == "" { + diskType = "hdd" + } + fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount) + } + return buf.String() +} + +func diskInfoToString(diskInfo *master_pb.DiskInfo) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount) + return buf.String() +} + func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics { - fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb) + fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos)) sort.Slice(t.DataCenterInfos, func(i, j int) bool { return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id }) @@ -58,7 +71,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi return s } func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics { - fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) + fmt.Fprintf(writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos)) var s statistics sort.Slice(t.RackInfos, func(i, j int) bool { return t.RackInfos[i].Id < t.RackInfos[j].Id @@ -70,7 +83,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti return s } func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { - fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) + fmt.Fprintf(writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos)) var s statistics sort.Slice(t.DataNodeInfos, func(i, j int) bool { return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id @@ -82,8 +95,22 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { return s } func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { - fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) + fmt.Fprintf(writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos)) + var s statistics + for _, diskInfo := range t.DiskInfos { + s = s.plus(writeDiskInfo(writer, diskInfo)) + } + fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s) + return s +} + +func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo) statistics { var s statistics + diskType := t.Type + if diskType == "" { + diskType = "hdd" + } + fmt.Fprintf(writer, " Disk %s(%s)\n", diskType, diskInfoToString(t)) sort.Slice(t.VolumeInfos, func(i, j int) bool { return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id }) @@ -91,13 +118,14 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { s = s.plus(writeVolumeInformationMessage(writer, vi)) } for _, ecShardInfo := range t.EcShardInfos { - fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds()) + fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds()) } - fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s) + fmt.Fprintf(writer, " Disk %s %+v \n", diskType, s) return s } + func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics { - fmt.Fprintf(writer, " volume %+v \n", t) + fmt.Fprintf(writer, " volume %+v \n", t) return newStatistics(t) } diff --git a/weed/shell/command_volume_list_test.go b/weed/shell/command_volume_list_test.go new file mode 100644 index 000000000..72c76f242 --- /dev/null +++ b/weed/shell/command_volume_list_test.go @@ -0,0 +1,893 @@ +package shell + +import ( + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "strconv" + "strings" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func TestParsing(t *testing.T) { + topo := parseOutput(topoData) + + assert.Equal(t, 5, len(topo.DataCenterInfos)) + +} + +func parseOutput(output string) *master_pb.TopologyInfo { + lines := strings.Split(output, "\n") + var topo *master_pb.TopologyInfo + var dc *master_pb.DataCenterInfo + var rack *master_pb.RackInfo + var dn *master_pb.DataNodeInfo + var disk *master_pb.DiskInfo + for _, line := range lines { + line = strings.TrimSpace(line) + parts := strings.Split(line, " ") + switch parts[0] { + case "Topology": + if topo == nil { + topo = &master_pb.TopologyInfo{} + } + case "DataCenter": + if dc == nil { + dc = &master_pb.DataCenterInfo{ + Id: parts[1], + } + topo.DataCenterInfos = append(topo.DataCenterInfos, dc) + } else { + dc = nil + } + case "Rack": + if rack == nil { + rack = &master_pb.RackInfo{ + Id: parts[1], + } + dc.RackInfos = append(dc.RackInfos, rack) + } else { + rack = nil + } + case "DataNode": + if dn == nil { + dn = &master_pb.DataNodeInfo{ + Id: parts[1], + DiskInfos: make(map[string]*master_pb.DiskInfo), + } + rack.DataNodeInfos = append(rack.DataNodeInfos, dn) + } else { + dn = nil + } + case "Disk": + if disk == nil { + diskType := parts[1][:strings.Index(parts[1], "(")] + maxVolumeCountStr := parts[1][strings.Index(parts[1], "/")+1:] + maxVolumeCount, _ := strconv.Atoi(maxVolumeCountStr) + disk = &master_pb.DiskInfo{ + Type: diskType, + MaxVolumeCount: uint64(maxVolumeCount), + } + dn.DiskInfos[types.ToDiskType(diskType).String()] = disk + } else { + disk = nil + } + case "volume": + volumeLine := line[len("volume "):] + volume := &master_pb.VolumeInformationMessage{} + proto.UnmarshalText(volumeLine, volume) + disk.VolumeInfos = append(disk.VolumeInfos, volume) + } + } + + return topo +} + +const topoData = ` +Topology volumeSizeLimit:1024 MB hdd(volume:760/7280 active:760 free:6520 remote:0) + DataCenter dc1 hdd(volume:0/0 active:0 free:0 remote:0) + Rack DefaultRack hdd(volume:0/0 active:0 free:0 remote:0) + Rack DefaultRack total size:0 file_count:0 + DataCenter dc1 total size:0 file_count:0 + DataCenter dc2 hdd(volume:86/430 active:86 free:344 remote:0) + Rack rack1 hdd(volume:50/240 active:50 free:190 remote:0) + DataNode 192.168.1.4:8080 hdd(volume:50/240 active:50 free:190 remote:0) + Disk hdd(volume:50/240 active:50 free:190 remote:0) + volume id:15 size:1115965064 collection:"collection0" file_count:83 replica_placement:100 version:3 modified_at_second:1609923671 + volume id:21 size:1097631536 collection:"collection0" file_count:82 delete_count:7 deleted_byte_count:68975485 replica_placement:100 version:3 modified_at_second:1609929578 + volume id:22 size:1086828272 collection:"collection0" file_count:75 replica_placement:100 version:3 modified_at_second:1609930001 + volume id:23 size:1076380216 collection:"collection0" file_count:68 replica_placement:100 version:3 modified_at_second:1609930434 + volume id:24 size:1074139776 collection:"collection0" file_count:90 replica_placement:100 version:3 modified_at_second:1609930909 + volume id:25 size:690757512 collection:"collection0" file_count:38 replica_placement:100 version:3 modified_at_second:1611144216 + volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482 + volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762 + volume id:29 size:281582680 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422 + volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422 + volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642 + volume id:33 size:1130226400 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789 + volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830 + volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786 + volume id:52 size:1083529728 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195 + volume id:54 size:1045022344 collection:"collection1" file_count:9408 delete_count:30 deleted_byte_count:15132106 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:63 size:956941112 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036 + volume id:69 size:869213648 collection:"collection1" file_count:7293 delete_count:102 deleted_byte_count:30643207 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534 + volume id:74 size:957046128 collection:"collection1" file_count:6982 delete_count:258 deleted_byte_count:73054259 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460 + volume id:80 size:827912928 collection:"collection1" file_count:6914 delete_count:17 deleted_byte_count:5689635 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631157 + volume id:84 size:873121856 collection:"collection1" file_count:8200 delete_count:13 deleted_byte_count:3131676 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161 + volume id:85 size:1023869320 collection:"collection1" file_count:7788 delete_count:234 deleted_byte_count:78037967 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723 + volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631193 + volume id:98 size:1077836440 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691 + volume id:105 size:1073996824 collection:"collection1" file_count:6872 delete_count:20 deleted_byte_count:14482293 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499757 + volume id:106 size:1075458664 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137 + volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823 + volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611565 + volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573878 + volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670 + volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584779 + volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036 + volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947 + volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451 + volume id:161 size:1077397192 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452 + volume id:173 size:1074154704 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2578509 replica_placement:100 version:3 modified_at_second:1615591904 + volume id:174 size:1073824232 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:197 size:1075423240 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485254 + volume id:219 size:1092298904 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316 + volume id:263 size:1077167352 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567 + volume id:272 size:1076146040 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139 + volume id:291 size:1076256760 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:299 size:1075147824 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918454 + volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378 + volume id:302 size:1077559792 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500 + volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969996 + volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977458 + volume id:355 size:1075239792 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157 + volume id:373 size:1080928000 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016877 + Disk hdd total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + DataNode 192.168.1.4:8080 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + Rack rack1 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + Rack rack2 hdd(volume:36/190 active:36 free:154 remote:0) + DataNode 192.168.1.2:8080 hdd(volume:36/190 active:36 free:154 remote:0) + Disk hdd(volume:36/190 active:36 free:154 remote:0) + volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:480564 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622 + volume id:3 size:308743136 file_count:1638 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242 + volume id:4 size:285986968 file_count:1641 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302 + volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402 + volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040 + volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762 + volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814550 + volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819732 + volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827837 + volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956980 + volume id:16 size:1113666104 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:21 size:1097631664 collection:"collection0" file_count:82 delete_count:11 deleted_byte_count:68985100 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:56 size:1001897616 collection:"collection1" file_count:8762 delete_count:37 deleted_byte_count:65375405 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014 + volume id:81 size:880693104 collection:"collection1" file_count:7481 delete_count:236 deleted_byte_count:80386421 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631396 + volume id:104 size:1076383624 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100728071 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602658 + volume id:107 size:1073811840 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747484 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569 + volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822 + volume id:139 size:1074163936 collection:"collection1" file_count:9315 delete_count:42 deleted_byte_count:10630966 replica_placement:100 version:3 modified_at_second:1615571946 + volume id:151 size:1098659752 collection:"collection1" file_count:10808 delete_count:24 deleted_byte_count:7088102 replica_placement:100 version:3 modified_at_second:1615586389 + volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:9076141 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:167 size:1073958176 collection:"collection1" file_count:25229 delete_count:48 deleted_byte_count:25871565 replica_placement:100 version:3 modified_at_second:1615602669 + volume id:177 size:1074120216 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3803952 replica_placement:100 version:3 modified_at_second:1615516892 + volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45552859 replica_placement:100 version:3 modified_at_second:1615580308 + volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1452346 replica_placement:100 version:3 modified_at_second:1615568922 + volume id:215 size:1068268216 collection:"collection1" file_count:2813 delete_count:10 deleted_byte_count:5676795 replica_placement:100 version:3 modified_at_second:1615586386 + volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638 + volume id:283 size:1080178944 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896626 + volume id:303 size:1075944504 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431 + volume id:309 size:1075178624 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409 + volume id:323 size:1074608200 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526 + volume id:344 size:1075035448 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465 + volume id:347 size:1075145496 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727 + volume id:357 size:1074276208 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792 + volume id:380 size:1010760456 collection:"collection2" file_count:14921 delete_count:6 deleted_byte_count:65678 replica_placement:100 version:3 modified_at_second:1615632322 + volume id:381 size:939292792 collection:"collection2" file_count:14619 delete_count:2 deleted_byte_count:5119 replica_placement:100 version:3 modified_at_second:1615632324 + Disk hdd total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + DataNode 192.168.1.2:8080 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + Rack rack2 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + DataCenter dc2 total size:82098209920 file_count:907048 deleted_file:3671 deleted_bytes:1475897341 + DataCenter dc3 hdd(volume:108/1850 active:108 free:1742 remote:0) + Rack rack3 hdd(volume:108/1850 active:108 free:1742 remote:0) + DataNode 192.168.1.6:8080 hdd(volume:108/1850 active:108 free:1742 remote:0) + Disk hdd(volume:108/1850 active:108 free:1742 remote:0) + volume id:1 size:284685936 file_count:1557 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062 + volume id:32 size:281390512 file_count:1496 delete_count:6 deleted_byte_count:546403 replica_placement:100 version:3 modified_at_second:1615632362 + volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397 + volume id:49 size:1078775288 collection:"collection1" file_count:9636 delete_count:22 deleted_byte_count:5625976 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630446 + volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:88 size:1073767976 collection:"collection1" file_count:14995 delete_count:206 deleted_byte_count:81222360 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629897 + volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497 + volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728 + volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314 + volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615564274 + volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954 + volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124 + volume id:212 size:1078293360 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:213 size:1093587976 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383 + volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243 + volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244 + volume id:227 size:1106699896 collection:"collection1" file_count:2827 delete_count:20 deleted_byte_count:5496790 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:229 size:1109855312 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609988 + volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537 + volume id:231 size:1112917696 collection:"collection1" file_count:3151 delete_count:20 deleted_byte_count:2989828 replica_placement:100 version:3 modified_at_second:1615611350 + volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611352 + volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727 + volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630838 + volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006 + volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1614852171 + volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093 + volume id:246 size:1075998648 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852105 + volume id:248 size:1084301184 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285 + volume id:249 size:1074819136 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856230 + volume id:251 size:1075684488 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270 + volume id:252 size:1075065208 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861196 + volume id:253 size:1087328816 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861255 + volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233 + volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100 + volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:259 size:1075507776 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381 + volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629 + volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1615629130 + volume id:266 size:1075617464 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526 + volume id:267 size:1075699544 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294 + volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877434 + volume id:269 size:1077552872 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481 + volume id:270 size:1076876568 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469 + volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147 + volume id:277 size:1074956288 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988 + volume id:278 size:1078798640 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292 + volume id:279 size:1077325040 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890229 + volume id:280 size:1077432216 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262 + volume id:281 size:1077581096 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237 + volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231 + volume id:285 size:1082128688 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165 + volume id:289 size:1075284256 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977 + volume id:290 size:1074723792 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:291 size:1076256768 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:293 size:1075409792 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235 + volume id:294 size:1075444048 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220 + volume id:296 size:1077824032 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301 + volume id:297 size:1080229136 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481 + volume id:298 size:1075410136 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474 + volume id:299 size:1075147936 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918455 + volume id:300 size:1076212392 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464 + volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378 + volume id:303 size:1075944480 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431 + volume id:306 size:1074764016 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462 + volume id:307 size:1076568000 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448 + volume id:308 size:1074022392 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401 + volume id:309 size:1075178664 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409 + volume id:310 size:1074761528 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077 + volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441 + volume id:315 size:1084153544 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937885 + volume id:317 size:1076215096 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965 + volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862 + volume id:319 size:1073952880 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810 + volume id:320 size:1082437792 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943599 + volume id:321 size:1081477904 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982 + volume id:324 size:1075606680 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310 + volume id:325 size:1080701144 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525 + volume id:330 size:1080825832 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956477 + volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869 + volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978 + volume id:336 size:1087853056 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963005 + volume id:337 size:1075646784 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937 + volume id:338 size:1076118304 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850 + volume id:340 size:1079462184 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969978 + volume id:341 size:1074448400 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858 + volume id:342 size:1080186424 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977521 + volume id:344 size:1075035416 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465 + volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977457 + volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1615631322 + volume id:348 size:1080623640 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984606 + volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682 + volume id:351 size:1078795112 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816 + volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130 + volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951 + volume id:355 size:1075239728 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157 + volume id:356 size:1083305048 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992028 + volume id:358 size:1085152368 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824 + volume id:360 size:1075532456 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770 + volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668 + volume id:375 size:1076140568 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787 + volume id:376 size:1074845944 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916 + volume id:377 size:957284144 collection:"collection2" file_count:14923 delete_count:1 deleted_byte_count:1797 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:378 size:959273936 collection:"collection2" file_count:15027 delete_count:4 deleted_byte_count:231414 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:381 size:939261032 collection:"collection2" file_count:14615 delete_count:5 deleted_byte_count:1192272 replica_placement:100 version:3 modified_at_second:1615632324 + Disk hdd total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataNode 192.168.1.6:8080 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + Rack rack3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataCenter dc3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataCenter dc4 hdd(volume:267/2000 active:267 free:1733 remote:0) + Rack DefaultRack hdd(volume:267/2000 active:267 free:1733 remote:0) + DataNode 192.168.1.1:8080 hdd(volume:267/2000 active:267 free:1733 remote:0) + Disk hdd(volume:267/2000 active:267 free:1733 remote:0) + volume id:1 size:284693256 file_count:1558 delete_count:2 deleted_byte_count:4818 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062 + volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:464508 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622 + volume id:3 size:308741952 file_count:1637 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242 + volume id:4 size:285986968 file_count:1640 delete_count:1 deleted_byte_count:145095 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302 + volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342 + volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402 + volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040 + volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762 + volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814543 + volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1609815123 + volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819726 + volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827832 + volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956983 + volume id:15 size:1115965160 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:4956 replica_placement:100 version:3 modified_at_second:1612957001 + volume id:16 size:1113666048 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1612957011 + volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1612957011 + volume id:22 size:1086828368 collection:"collection0" file_count:75 delete_count:3 deleted_byte_count:5551 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:23 size:1076380280 collection:"collection0" file_count:68 delete_count:2 deleted_byte_count:2910 replica_placement:100 version:3 modified_at_second:1612957011 + volume id:24 size:1074139808 collection:"collection0" file_count:90 delete_count:1 deleted_byte_count:1977 replica_placement:100 version:3 modified_at_second:1612957011 + volume id:25 size:690757544 collection:"collection0" file_count:38 delete_count:1 deleted_byte_count:1944 replica_placement:100 version:3 modified_at_second:1612956995 + volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170023 + volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366 + volume id:35 size:1075241656 collection:"collection1" file_count:10523 delete_count:96 deleted_byte_count:46618989 replica_placement:100 version:3 modified_at_second:1615618790 + volume id:36 size:1075118360 collection:"collection1" file_count:10342 delete_count:116 deleted_byte_count:25493106 replica_placement:100 version:3 modified_at_second:1615606148 + volume id:37 size:1075895584 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594777 + volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959 + volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994 + volume id:41 size:1076283528 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19319268 replica_placement:100 version:3 modified_at_second:1615596736 + volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548908 + volume id:43 size:1116445864 collection:"collection1" file_count:7358 delete_count:54 deleted_byte_count:9534379 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573499 + volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569826 + volume id:48 size:1076778720 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690 + volume id:50 size:1076688224 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610879 + volume id:52 size:1083529704 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195 + volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631452 + volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631774 + volume id:58 size:908064200 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632342 + volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238 + volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439 + volume id:61 size:975604488 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176 + volume id:62 size:873845936 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133 + volume id:64 size:965638488 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631031 + volume id:65 size:823283552 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632306 + volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968 + volume id:67 size:878713872 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398 + volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630982 + volume id:73 size:929047664 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707 + volume id:74 size:957046128 collection:"collection1" file_count:6981 delete_count:259 deleted_byte_count:73080838 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460 + volume id:75 size:908044992 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430 + volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632013 + volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829 + volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326 + volume id:82 size:1041661800 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430 + volume id:83 size:936195856 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029 + volume id:85 size:1023867520 collection:"collection1" file_count:7787 delete_count:240 deleted_byte_count:82091742 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723 + volume id:86 size:1009437488 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:87 size:922276640 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438 + volume id:89 size:1044401976 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208 + volume id:90 size:891145784 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629390 + volume id:91 size:936572832 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851 + volume id:92 size:992440712 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566 + volume id:93 size:1079603768 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556015 + volume id:94 size:1030685824 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616 + volume id:95 size:990879168 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631866 + volume id:96 size:989296136 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778 + volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631194 + volume id:99 size:1071718504 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175 + volume id:100 size:1083617440 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505917 + volume id:101 size:1077109520 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994 + volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626683 + volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417 + volume id:104 size:1076383768 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100578087 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602661 + volume id:105 size:1073996824 collection:"collection1" file_count:6873 delete_count:19 deleted_byte_count:14271533 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499756 + volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232 + volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487 + volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786 + volume id:111 size:1073826232 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593233 + volume id:114 size:1074762784 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826 + volume id:115 size:1076192240 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285295 + volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611567 + volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573714 + volume id:118 size:1074064400 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516265 + volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573880 + volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567 + volume id:121 size:1078803248 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506629 + volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:126 size:1073867464 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585912 + volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612991 + volume id:129 size:1074704272 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566 + volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582330 + volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898 + volume id:132 size:1088539496 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619584 + volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584780 + volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555876 + volume id:135 size:1073906720 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816 + volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:139 size:1074163936 collection:"collection1" file_count:9314 delete_count:43 deleted_byte_count:10631353 replica_placement:100 version:3 modified_at_second:1615571946 + volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994 + volume id:142 size:1075732992 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488586 + volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825 + volume id:146 size:1076337520 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005 + volume id:147 size:1077130544 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604117 + volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586393 + volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494496 + volume id:150 size:1076290408 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611569 + volume id:151 size:1098659752 collection:"collection1" file_count:10805 delete_count:27 deleted_byte_count:7209106 replica_placement:100 version:3 modified_at_second:1615586390 + volume id:152 size:1075941376 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:154 size:1081244752 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478471 + volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628342 + volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879 + volume id:159 size:1074132336 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593 + volume id:160 size:1075680976 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:161 size:1077397136 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452 + volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478127 + volume id:163 size:1074457224 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313 + volume id:164 size:1074261256 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620085 + volume id:165 size:1073891080 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471910 + volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491021 + volume id:167 size:1073958280 collection:"collection1" file_count:25231 delete_count:48 deleted_byte_count:26022344 replica_placement:100 version:3 modified_at_second:1615620014 + volume id:168 size:1074718864 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664 + volume id:169 size:1073863032 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832 + volume id:170 size:1075747088 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579207 + volume id:171 size:1081881400 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596486 + volume id:172 size:1074787304 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385 + volume id:174 size:1073824160 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:175 size:1077742472 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559516 + volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169 + volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369 + volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45574435 replica_placement:100 version:3 modified_at_second:1615580308 + volume id:180 size:1078438448 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773243 + volume id:181 size:1074571672 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745117 + volume id:183 size:1076361616 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572985 + volume id:184 size:1074594216 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578 + volume id:185 size:1074099592 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506835 + volume id:186 size:1074220664 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614934 + volume id:187 size:1074396112 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590491 + volume id:188 size:1074732632 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645 + volume id:189 size:1074847824 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661 + volume id:190 size:1074008968 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447162 + volume id:191 size:1075492960 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357457 + volume id:192 size:1075857384 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632 + volume id:193 size:1076616760 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507877 + volume id:194 size:1073985792 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559502 + volume id:195 size:1074158304 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331 + volume id:196 size:1074594640 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501690 + volume id:198 size:1075104624 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623371 + volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:200 size:1075630464 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252 + volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:204 size:1079766904 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565702 + volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061 + volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954 + volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124 + volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637 + volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317 + volume id:221 size:1106545536 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138 + volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434685 + volume id:226 size:1103504792 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:227 size:1106699864 collection:"collection1" file_count:2827 delete_count:19 deleted_byte_count:5393310 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489 + volume id:229 size:1109855256 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:231 size:1112917664 collection:"collection1" file_count:3151 delete_count:19 deleted_byte_count:2852517 replica_placement:100 version:3 modified_at_second:1615611350 + volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611352 + volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:236 size:1089476200 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030 + volume id:240 size:424791528 collection:"collection1" file_count:734 delete_count:12 deleted_byte_count:7353071 replica_placement:100 version:3 modified_at_second:1615631669 + volume id:242 size:1075383304 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852115 + volume id:243 size:1088174560 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852202 + volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093 + volume id:247 size:1075859784 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152 + volume id:249 size:1074819168 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856231 + volume id:250 size:1080572256 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129 + volume id:251 size:1075684408 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270 + volume id:254 size:1074830800 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115 + volume id:257 size:1082621664 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866395 + volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226 + volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:262 size:1078492464 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601 + volume id:263 size:1077167440 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567 + volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877435 + volume id:269 size:1077552720 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481 + volume id:271 size:1076992648 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504 + volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046 + volume id:274 size:1075994128 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113 + volume id:276 size:1076899888 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003 + volume id:277 size:1074956160 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988 + volume id:279 size:1077325096 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890230 + volume id:282 size:1075232240 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304 + volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231 + volume id:285 size:1082128576 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165 + volume id:286 size:1077464824 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401 + volume id:287 size:1074590528 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904874 + volume id:288 size:1075406800 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738 + volume id:292 size:1092010744 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912327 + volume id:293 size:1075409776 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235 + volume id:294 size:1075443992 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220 + volume id:295 size:1074702376 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929 + volume id:300 size:1076212424 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464 + volume id:304 size:1081038888 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925567 + volume id:305 size:1074185552 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312 + volume id:310 size:1074761520 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077 + volume id:311 size:1088248208 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931460 + volume id:312 size:1075037808 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937832 + volume id:313 size:1074876016 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755 + volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441 + volume id:315 size:1084153456 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937884 + volume id:316 size:1077720784 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838 + volume id:317 size:1076215040 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965 + volume id:319 size:1073952744 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810 + volume id:320 size:1082437736 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943591 + volume id:321 size:1081477960 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982 + volume id:322 size:1078471600 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950574 + volume id:324 size:1075606712 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310 + volume id:326 size:1076059936 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619 + volume id:327 size:1076121224 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487 + volume id:328 size:1074767928 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362 + volume id:329 size:1076691776 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485 + volume id:331 size:1074957192 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259 + volume id:333 size:1074270192 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697 + volume id:335 size:1076235176 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970 + volume id:336 size:1087853032 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963003 + volume id:338 size:1076118360 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850 + volume id:342 size:1080186296 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977518 + volume id:343 size:1075345184 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148 + volume id:349 size:1075957824 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748 + volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682 + volume id:354 size:1085213992 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992207 + volume id:356 size:1083304992 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992027 + volume id:358 size:1085152312 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824 + volume id:359 size:1074211240 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711 + volume id:362 size:1074074120 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407 + volume id:363 size:1078859496 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004822 + volume id:364 size:1081280816 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813 + volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776 + volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703 + volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985 + volume id:368 size:1074822016 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010878 + volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011019 + volume id:370 size:1076040480 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:371 size:1078806160 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010994 + volume id:372 size:1076193312 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911 + volume id:374 size:1085011080 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917 + volume id:375 size:1076140688 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787 + volume id:378 size:959273824 collection:"collection2" file_count:15031 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:379 size:1014108592 collection:"collection2" file_count:15360 delete_count:8 deleted_byte_count:2524591 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:380 size:1010760464 collection:"collection2" file_count:14920 delete_count:7 deleted_byte_count:134859 replica_placement:100 version:3 modified_at_second:1615632322 + Disk hdd total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataNode 192.168.1.1:8080 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + Rack DefaultRack total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataCenter dc4 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataCenter dc5 hdd(volume:299/3000 active:299 free:2701 remote:0) + Rack DefaultRack hdd(volume:299/3000 active:299 free:2701 remote:0) + DataNode 192.168.1.5:8080 hdd(volume:299/3000 active:299 free:2701 remote:0) + Disk hdd(volume:299/3000 active:299 free:2701 remote:0) + volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342 + volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1615629606 + volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1615631673 + volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1615630117 + volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629340 + volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170024 + volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482 + volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762 + volume id:29 size:281582688 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422 + volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422 + volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642 + volume id:32 size:281343360 file_count:1497 replica_placement:100 version:3 modified_at_second:1615632362 + volume id:33 size:1130226344 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789 + volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366 + volume id:35 size:1075241744 collection:"collection1" file_count:10523 delete_count:97 deleted_byte_count:46586217 replica_placement:100 version:3 modified_at_second:1615618790 + volume id:36 size:1075118336 collection:"collection1" file_count:10341 delete_count:118 deleted_byte_count:24753278 replica_placement:100 version:3 modified_at_second:1615606148 + volume id:37 size:1075895576 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594776 + volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830 + volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959 + volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994 + volume id:41 size:1076283592 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19311237 replica_placement:100 version:3 modified_at_second:1615596736 + volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548906 + volume id:43 size:1116445864 collection:"collection1" file_count:7355 delete_count:57 deleted_byte_count:9727158 replica_placement:100 version:3 modified_at_second:1615566167 + volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573498 + volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569825 + volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397 + volume id:48 size:1076778664 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690 + volume id:49 size:1078775288 collection:"collection1" file_count:9631 delete_count:27 deleted_byte_count:5985628 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615575823 + volume id:50 size:1076688288 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610876 + volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786 + volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:54 size:1045022288 collection:"collection1" file_count:9409 delete_count:29 deleted_byte_count:15102818 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630813 + volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631453 + volume id:56 size:1002412240 collection:"collection1" file_count:8762 delete_count:40 deleted_byte_count:65885831 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014 + volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631775 + volume id:58 size:908064192 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632343 + volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238 + volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439 + volume id:61 size:975604544 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176 + volume id:62 size:873845904 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133 + volume id:63 size:956941176 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036 + volume id:64 size:965638424 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631032 + volume id:65 size:823283608 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968 + volume id:67 size:878713880 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:69 size:863913896 collection:"collection1" file_count:7291 delete_count:100 deleted_byte_count:25335024 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534 + volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398 + volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630985 + volume id:73 size:929047544 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707 + volume id:75 size:908045000 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430 + volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632014 + volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829 + volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326 + volume id:80 size:827912928 collection:"collection1" file_count:6916 delete_count:15 deleted_byte_count:5611604 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631159 + volume id:81 size:880693168 collection:"collection1" file_count:7481 delete_count:238 deleted_byte_count:80880878 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631395 + volume id:82 size:1041660512 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430 + volume id:83 size:936194288 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029 + volume id:84 size:871262320 collection:"collection1" file_count:8190 delete_count:14 deleted_byte_count:3150948 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161 + volume id:86 size:1009434632 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:87 size:922274624 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438 + volume id:88 size:1073767976 collection:"collection1" file_count:14994 delete_count:207 deleted_byte_count:82380696 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615541383 + volume id:89 size:1044421824 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208 + volume id:90 size:891163760 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629392 + volume id:91 size:936573952 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851 + volume id:92 size:992439144 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566 + volume id:93 size:1079602592 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556013 + volume id:94 size:1030684704 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616 + volume id:95 size:990877824 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631867 + volume id:96 size:989294848 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778 + volume id:98 size:1077836472 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691 + volume id:99 size:1071718496 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175 + volume id:100 size:1083617472 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505914 + volume id:101 size:1077109408 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864780 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994 + volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626682 + volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417 + volume id:106 size:1075458680 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137 + volume id:107 size:1073811776 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747428 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569 + volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593231 + volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487 + volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786 + volume id:111 size:1073826176 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232 + volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823 + volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822 + volume id:114 size:1074762792 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826 + volume id:115 size:1076192296 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285296 + volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573712 + volume id:118 size:1074064344 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516264 + volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567 + volume id:121 size:1078803320 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506626 + volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585912 + volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:126 size:1073867408 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585911 + volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612990 + volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670 + volume id:129 size:1074704328 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566 + volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582327 + volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898 + volume id:132 size:1088539552 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619581 + volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555873 + volume id:135 size:1073906776 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816 + volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036 + volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947 + volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994 + volume id:142 size:1075733064 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598913 + volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488584 + volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451 + volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825 + volume id:146 size:1076337576 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005 + volume id:147 size:1077130576 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604115 + volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586390 + volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494494 + volume id:150 size:1076290328 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611567 + volume id:152 size:1075941400 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:154 size:1081244696 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478469 + volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:10076804 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628341 + volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879 + volume id:159 size:1074132368 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593 + volume id:160 size:1075680952 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597780 + volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478126 + volume id:163 size:1074457192 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313 + volume id:164 size:1074261248 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620084 + volume id:165 size:1073891016 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471907 + volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491019 + volume id:168 size:1074718808 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664 + volume id:169 size:1073863128 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832 + volume id:170 size:1075747096 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579204 + volume id:171 size:1081881312 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596485 + volume id:172 size:1074787312 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385 + volume id:173 size:1074154648 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2430948 replica_placement:100 version:3 modified_at_second:1615591904 + volume id:175 size:1077742504 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559515 + volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169 + volume id:177 size:1074120120 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3719562 replica_placement:100 version:3 modified_at_second:1615516891 + volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369 + volume id:180 size:1078438536 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773242 + volume id:181 size:1074571768 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745116 + volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1416142 replica_placement:100 version:3 modified_at_second:1615568922 + volume id:183 size:1076361448 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572982 + volume id:184 size:1074594160 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578 + volume id:185 size:1074099624 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506832 + volume id:186 size:1074220864 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614933 + volume id:187 size:1074395944 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590490 + volume id:188 size:1074732792 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645 + volume id:189 size:1074847896 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661 + volume id:190 size:1074008912 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447161 + volume id:191 size:1075493024 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357456 + volume id:192 size:1075857400 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632 + volume id:193 size:1076616768 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507875 + volume id:194 size:1073985624 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559499 + volume id:195 size:1074158312 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331 + volume id:196 size:1074594784 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501688 + volume id:197 size:1075423296 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485253 + volume id:198 size:1075104712 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623369 + volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585984 + volume id:200 size:1075630536 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252 + volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497 + volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728 + volume id:204 size:1079766872 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565701 + volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314 + volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630989 + volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061 + volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:212 size:1078293448 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:213 size:1093588072 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383 + volume id:215 size:1074798704 collection:"collection1" file_count:2819 delete_count:31 deleted_byte_count:10873569 replica_placement:100 version:3 modified_at_second:1615586386 + volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638 + volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637 + volume id:219 size:1092298816 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316 + volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317 + volume id:221 size:1106545456 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138 + volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243 + volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244 + volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434684 + volume id:226 size:1103504768 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601867 + volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489 + volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537 + volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:236 size:1089476136 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727 + volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030 + volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630841 + volume id:240 size:424791528 collection:"collection1" file_count:733 delete_count:13 deleted_byte_count:7515220 replica_placement:100 version:3 modified_at_second:1615631670 + volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006 + volume id:242 size:1075383392 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852116 + volume id:243 size:1088174704 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852203 + volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1615628825 + volume id:246 size:1075998672 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852106 + volume id:247 size:1075859808 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152 + volume id:248 size:1084301208 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285 + volume id:250 size:1080572168 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129 + volume id:252 size:1075065264 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861200 + volume id:253 size:1087328880 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861258 + volume id:254 size:1074830736 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115 + volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233 + volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100 + volume id:257 size:1082621720 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866402 + volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:259 size:1075507848 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381 + volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226 + volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:262 size:1078492584 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601 + volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629 + volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1614871543 + volume id:266 size:1075617552 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526 + volume id:267 size:1075699376 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294 + volume id:270 size:1076876424 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469 + volume id:271 size:1076992704 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504 + volume id:272 size:1076145912 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139 + volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046 + volume id:274 size:1075994184 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113 + volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147 + volume id:276 size:1076899880 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003 + volume id:278 size:1078798632 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292 + volume id:280 size:1077432160 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262 + volume id:281 size:1077581064 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237 + volume id:282 size:1075232184 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304 + volume id:283 size:1080178880 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896623 + volume id:286 size:1077464816 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401 + volume id:287 size:1074590536 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904875 + volume id:288 size:1075406920 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738 + volume id:289 size:1075284312 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977 + volume id:290 size:1074723800 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:292 size:1092010672 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912325 + volume id:295 size:1074702320 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929 + volume id:296 size:1077824056 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301 + volume id:297 size:1080229176 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481 + volume id:298 size:1075410024 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474 + volume id:302 size:1077559640 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500 + volume id:304 size:1081038944 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925569 + volume id:305 size:1074185376 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312 + volume id:306 size:1074763952 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462 + volume id:307 size:1076567912 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448 + volume id:308 size:1074022336 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401 + volume id:311 size:1088248344 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931463 + volume id:312 size:1075037528 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937831 + volume id:313 size:1074875960 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755 + volume id:316 size:1077720776 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838 + volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862 + volume id:322 size:1078471536 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950572 + volume id:323 size:1074608056 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526 + volume id:325 size:1080701232 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525 + volume id:326 size:1076059920 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619 + volume id:327 size:1076121304 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487 + volume id:328 size:1074767816 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362 + volume id:329 size:1076691960 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485 + volume id:330 size:1080825760 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956476 + volume id:331 size:1074957256 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259 + volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869 + volume id:333 size:1074270160 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697 + volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978 + volume id:335 size:1076235136 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970 + volume id:337 size:1075646896 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937 + volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969995 + volume id:340 size:1079462152 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969977 + volume id:341 size:1074448360 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858 + volume id:343 size:1075345072 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148 + volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1614977511 + volume id:347 size:1075145248 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727 + volume id:348 size:1080623544 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984604 + volume id:349 size:1075957672 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748 + volume id:351 size:1078795120 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816 + volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130 + volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951 + volume id:354 size:1085214104 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992208 + volume id:357 size:1074276152 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792 + volume id:359 size:1074211296 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711 + volume id:360 size:1075532512 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770 + volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668 + volume id:362 size:1074074176 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407 + volume id:363 size:1078859640 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004823 + volume id:364 size:1081280880 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813 + volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776 + volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703 + volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985 + volume id:368 size:1074821960 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011021 + volume id:370 size:1076040544 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:371 size:1078806216 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010995 + volume id:372 size:1076193344 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911 + volume id:373 size:1080928088 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016878 + volume id:374 size:1085011176 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917 + volume id:376 size:1074845832 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916 + volume id:377 size:957434264 collection:"collection2" file_count:14929 delete_count:1 deleted_byte_count:43099 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:379 size:1014108528 collection:"collection2" file_count:15362 delete_count:6 deleted_byte_count:2481613 replica_placement:100 version:3 modified_at_second:1615632323 + Disk hdd total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + DataNode 192.168.1.5:8080 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + Rack DefaultRack total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + DataCenter dc5 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 +total size:775256653592 file_count:10478712 deleted_file:33754 deleted_bytes:10839266043 +` diff --git a/weed/shell/command_volume_mark.go b/weed/shell/command_volume_mark.go new file mode 100644 index 000000000..19b614310 --- /dev/null +++ b/weed/shell/command_volume_mark.go @@ -0,0 +1,55 @@ +package shell + +import ( + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeMark{}) +} + +type commandVolumeMark struct { +} + +func (c *commandVolumeMark) Name() string { + return "volume.mark" +} + +func (c *commandVolumeMark) Help() string { + return `Mark volume writable or readonly from one volume server + + volume.mark -node <volume server host:port> -volumeId <volume id> -writable or -readonly +` +} + +func (c *commandVolumeMark) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + volMarkCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volMarkCommand.Int("volumeId", 0, "the volume id") + nodeStr := volMarkCommand.String("node", "", "the volume server <host>:<port>") + writable := volMarkCommand.Bool("writable", false, "volume mark writable") + readonly := volMarkCommand.Bool("readonly", false, "volume mark readonly") + if err = volMarkCommand.Parse(args); err != nil { + return nil + } + markWritable := false + if (*writable && *readonly) || (!*writable && !*readonly) { + return fmt.Errorf("use -readonly or -writable") + } else if *writable { + markWritable = true + } + + sourceVolumeServer := *nodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + + return markVolumeWritable(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, markWritable) +} diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go index 50a307492..bd588d0b5 100644 --- a/weed/shell/command_volume_mount.go +++ b/weed/shell/command_volume_mount.go @@ -2,7 +2,7 @@ package shell import ( "context" - "fmt" + "flag" "io" "github.com/chrislusf/seaweedfs/weed/operation" @@ -25,7 +25,7 @@ func (c *commandVolumeMount) Name() string { func (c *commandVolumeMount) Help() string { return `mount a volume from one volume server - volume.mount <volume server host:port> <volume id> + volume.mount -node <volume server host:port> -volumeId <volume id> This command mounts a volume from one volume server. @@ -34,25 +34,28 @@ func (c *commandVolumeMount) Help() string { func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) != 2 { - fmt.Fprintf(writer, "received args: %+v\n", args) - return fmt.Errorf("need 2 args of <volume server host:port> <volume id>") + if err = commandEnv.confirmIsLocked(); err != nil { + return } - sourceVolumeServer, volumeIdString := args[0], args[1] - volumeId, err := needle.NewVolumeId(volumeIdString) - if err != nil { - return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) + volMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volMountCommand.Int("volumeId", 0, "the volume id") + nodeStr := volMountCommand.String("node", "", "the volume server <host>:<port>") + if err = volMountCommand.Parse(args); err != nil { + return nil } - ctx := context.Background() - return mountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + sourceVolumeServer := *nodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + + return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) return mountErr diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index 08d87c988..84f33db34 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -2,6 +2,7 @@ package shell import ( "context" + "flag" "fmt" "io" "log" @@ -25,9 +26,10 @@ func (c *commandVolumeMove) Name() string { } func (c *commandVolumeMove) Help() string { - return `<experimental> move a live volume from one volume server to another volume server + return `move a live volume from one volume server to another volume server - volume.move <source volume server host:port> <target volume server host:port> <volume id> + volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id> + volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id> -disk [hdd|ssd|<tag>] This command move a live volume from one volume server to another volume server. Here are the steps: @@ -39,46 +41,53 @@ func (c *commandVolumeMove) Help() string { Now the master will mark this volume id as writable. 5. This command asks the source volume server to delete the source volume + The option "-disk [hdd|ssd|<tag>]" can be used to change the volume disk type. + ` } func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) != 3 { - fmt.Fprintf(writer, "received args: %+v\n", args) - return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>") + if err = commandEnv.confirmIsLocked(); err != nil { + return } - sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2] - volumeId, err := needle.NewVolumeId(volumeIdString) - if err != nil { - return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) + volMoveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volMoveCommand.Int("volumeId", 0, "the volume id") + sourceNodeStr := volMoveCommand.String("source", "", "the source volume server <host>:<port>") + targetNodeStr := volMoveCommand.String("target", "", "the target volume server <host>:<port>") + diskTypeStr := volMoveCommand.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag") + if err = volMoveCommand.Parse(args); err != nil { + return nil } + sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + if sourceVolumeServer == targetVolumeServer { return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second) + return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second, *diskTypeStr) } // LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests. -func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) { +func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration, diskType string) (err error) { log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - lastAppendAtNs, err := copyVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, diskType) if err != nil { return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - if err = tailVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { + if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer) - if err = deleteVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer); err != nil { + if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil { return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err) } @@ -86,12 +95,50 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI return nil } -func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { +func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, diskType string) (lastAppendAtNs uint64, err error) { + + // check to see if the volume is already read-only and if its not then we need + // to mark it as read-only and then before we return we need to undo what we + // did + var shouldMarkWritable bool + defer func() { + if !shouldMarkWritable { + return + } + + clientErr := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, writableErr := volumeServerClient.VolumeMarkWritable(context.Background(), &volume_server_pb.VolumeMarkWritableRequest{ + VolumeId: uint32(volumeId), + }) + return writableErr + }) + if clientErr != nil { + log.Printf("failed to mark volume %d as writable after copy from %s: %v", volumeId, sourceVolumeServer, clientErr) + } + }() + + err = operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, statusErr := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{ + VolumeId: uint32(volumeId), + }) + if statusErr == nil && !resp.IsReadOnly { + shouldMarkWritable = true + _, readonlyErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ + VolumeId: uint32(volumeId), + }) + return readonlyErr + } + return statusErr + }) + if err != nil { + return + } err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ + resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ VolumeId: uint32(volumeId), SourceDataNode: sourceVolumeServer, + DiskType: diskType, }) if replicateErr == nil { lastAppendAtNs = resp.LastAppendAtNs @@ -102,10 +149,10 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne return } -func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { +func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{ + _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{ VolumeId: uint32(volumeId), SinceNs: lastAppendAtNs, IdleTimeoutSeconds: uint32(idleTimeout.Seconds()), @@ -116,11 +163,26 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne } -func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{ + _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{ VolumeId: uint32(volumeId), }) return deleteErr }) } + +func markVolumeWritable(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string, writable bool) (err error) { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + if writable { + _, err = volumeServerClient.VolumeMarkWritable(context.Background(), &volume_server_pb.VolumeMarkWritableRequest{ + VolumeId: uint32(volumeId), + }) + } else { + _, err = volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ + VolumeId: uint32(volumeId), + }) + } + return err + }) +} diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go new file mode 100644 index 000000000..f21d0334c --- /dev/null +++ b/weed/shell/command_volume_server_evacuate.go @@ -0,0 +1,221 @@ +package shell + +import ( + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "io" + "os" + "sort" +) + +func init() { + Commands = append(Commands, &commandVolumeServerEvacuate{}) +} + +type commandVolumeServerEvacuate struct { +} + +func (c *commandVolumeServerEvacuate) Name() string { + return "volumeServer.evacuate" +} + +func (c *commandVolumeServerEvacuate) Help() string { + return `move out all data on a volume server + + volumeServer.evacuate -node <host:port> + + This command moves all data away from the volume server. + The volumes on the volume servers will be redistributed. + + Usually this is used to prepare to shutdown or upgrade the volume server. + + Sometimes a volume can not be moved because there are no + good destination to meet the replication requirement. + E.g. a volume replication 001 in a cluster with 2 volume servers can not be moved. + You can use "-skipNonMoveable" to move the rest volumes. + +` +} + +func (c *commandVolumeServerEvacuate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + vsEvacuateCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeServer := vsEvacuateCommand.String("node", "", "<host>:<port> of the volume server") + skipNonMoveable := vsEvacuateCommand.Bool("skipNonMoveable", false, "skip volumes that can not be moved") + applyChange := vsEvacuateCommand.Bool("force", false, "actually apply the changes") + if err = vsEvacuateCommand.Parse(args); err != nil { + return nil + } + + if *volumeServer == "" { + return fmt.Errorf("need to specify volume server by -node=<host>:<port>") + } + + return volumeServerEvacuate(commandEnv, *volumeServer, *skipNonMoveable, *applyChange, writer) + +} + +func volumeServerEvacuate(commandEnv *CommandEnv, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) (err error) { + // 1. confirm the volume server is part of the cluster + // 2. collect all other volume servers, sort by empty slots + // 3. move to any other volume server as long as it satisfy the replication requirements + + // list all the volumes + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + if err := evacuateNormalVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil { + return err + } + + if err := evacuateEcVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil { + return err + } + + return nil +} + +func evacuateNormalVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { + // find this volume server + volumeServers := collectVolumeServersByDc(topologyInfo, "") + thisNode, otherNodes := nodesOtherThan(volumeServers, volumeServer) + if thisNode == nil { + return fmt.Errorf("%s is not found in this cluster", volumeServer) + } + + // move away normal volumes + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + for _, diskInfo := range thisNode.info.DiskInfos { + for _, vol := range diskInfo.VolumeInfos { + hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange) + if err != nil { + return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err) + } + if !hasMoved { + if skipNonMoveable { + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement)) + fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String()) + } else { + return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer) + } + } + } + } + return nil +} + +func evacuateEcVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error { + // find this ec volume server + ecNodes, _ := collectEcVolumeServersByDc(topologyInfo, "") + thisNode, otherNodes := ecNodesOtherThan(ecNodes, volumeServer) + if thisNode == nil { + return fmt.Errorf("%s is not found in this cluster\n", volumeServer) + } + + // move away ec volumes + for _, diskInfo := range thisNode.info.DiskInfos { + for _, ecShardInfo := range diskInfo.EcShardInfos { + hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange) + if err != nil { + return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err) + } + if !hasMoved { + if skipNonMoveable { + fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer) + } else { + return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer) + } + } + } + } + return nil +} + +func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) { + + for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() { + + sort.Slice(otherNodes, func(i, j int) bool { + return otherNodes[i].localShardIdCount(ecShardInfo.Id) < otherNodes[j].localShardIdCount(ecShardInfo.Id) + }) + + for i := 0; i < len(otherNodes); i++ { + emptyNode := otherNodes[i] + collectionPrefix := "" + if ecShardInfo.Collection != "" { + collectionPrefix = ecShardInfo.Collection + "_" + } + fmt.Fprintf(os.Stdout, "moving ec volume %s%d.%d %s => %s\n", collectionPrefix, ecShardInfo.Id, shardId, thisNode.info.Id, emptyNode.info.Id) + err = moveMountedShardToEcNode(commandEnv, thisNode, ecShardInfo.Collection, needle.VolumeId(ecShardInfo.Id), shardId, emptyNode, applyChange) + if err != nil { + return + } else { + hasMoved = true + break + } + } + if !hasMoved { + return + } + } + + return +} + +func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) { + fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType)) + for _, n := range otherNodes { + n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { + return v.DiskType == vol.DiskType + }) + } + sort.Slice(otherNodes, func(i, j int) bool { + return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn) + }) + + for i := 0; i < len(otherNodes); i++ { + emptyNode := otherNodes[i] + hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, thisNode, vol, emptyNode, applyChange) + if err != nil { + return + } + if hasMoved { + break + } + } + return +} + +func nodesOtherThan(volumeServers []*Node, thisServer string) (thisNode *Node, otherNodes []*Node) { + for _, node := range volumeServers { + if node.info.Id == thisServer { + thisNode = node + continue + } + otherNodes = append(otherNodes, node) + } + return +} + +func ecNodesOtherThan(volumeServers []*EcNode, thisServer string) (thisNode *EcNode, otherNodes []*EcNode) { + for _, node := range volumeServers { + if node.info.Id == thisServer { + thisNode = node + continue + } + otherNodes = append(otherNodes, node) + } + return +} diff --git a/weed/shell/command_volume_server_evacuate_test.go b/weed/shell/command_volume_server_evacuate_test.go new file mode 100644 index 000000000..5753af78b --- /dev/null +++ b/weed/shell/command_volume_server_evacuate_test.go @@ -0,0 +1,17 @@ +package shell + +import ( + "os" + "testing" +) + +func TestVolumeServerEvacuate(t *testing.T) { + topologyInfo := parseOutput(topoData) + + volumeServer := "192.168.1.4:8080" + + if err := evacuateNormalVolumes(nil, topologyInfo, volumeServer, true, false, os.Stdout); err != nil { + t.Errorf("evacuate: %v", err) + } + +} diff --git a/weed/shell/command_volume_server_leave.go b/weed/shell/command_volume_server_leave.go new file mode 100644 index 000000000..2a2e56e86 --- /dev/null +++ b/weed/shell/command_volume_server_leave.go @@ -0,0 +1,67 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "google.golang.org/grpc" + "io" +) + +func init() { + Commands = append(Commands, &commandVolumeServerLeave{}) +} + +type commandVolumeServerLeave struct { +} + +func (c *commandVolumeServerLeave) Name() string { + return "volumeServer.leave" +} + +func (c *commandVolumeServerLeave) Help() string { + return `stop a volume server from sending heartbeats to the master + + volume.unmount -node <volume server host:port> -force + + This command enables gracefully shutting down the volume server. + The volume server will stop sending heartbeats to the master. + After draining the traffic for a few seconds, you can safely shut down the volume server. + + This operation is not revocable unless the volume server is restarted. +` +} + +func (c *commandVolumeServerLeave) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + vsLeaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeServer := vsLeaveCommand.String("node", "", "<host>:<port> of the volume server") + if err = vsLeaveCommand.Parse(args); err != nil { + return nil + } + + if *volumeServer == "" { + return fmt.Errorf("need to specify volume server by -node=<host>:<port>") + } + + return volumeServerLeave(commandEnv.option.GrpcDialOption, *volumeServer, writer) + +} + +func volumeServerLeave(grpcDialOption grpc.DialOption, volumeServer string, writer io.Writer) (err error) { + return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, leaveErr := volumeServerClient.VolumeServerLeave(context.Background(), &volume_server_pb.VolumeServerLeaveRequest{}) + if leaveErr != nil { + fmt.Fprintf(writer, "ask volume server %s to leave: %v\n", volumeServer, leaveErr) + } else { + fmt.Fprintf(writer, "stopped heartbeat in volume server %s. After a few seconds to drain traffic, it will be safe to stop the volume server.\n", volumeServer) + } + return leaveErr + }) +} diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go index 88e2e8b92..33166ce65 100644 --- a/weed/shell/command_volume_tier_download.go +++ b/weed/shell/command_volume_tier_download.go @@ -26,7 +26,7 @@ func (c *commandVolumeTierDownload) Name() string { } func (c *commandVolumeTierDownload) Help() string { - return `move the dat file of a volume to a remote tier + return `download the dat file of a volume from a remote tier volume.tier.download [-collection=""] volume.tier.download [-collection=""] -volumeId=<volume_id> @@ -42,6 +42,10 @@ func (c *commandVolumeTierDownload) Help() string { func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volumeId := tierCommand.Int("volumeId", 0, "the volume id") collection := tierCommand.String("collection", "", "the collection name") @@ -49,18 +53,17 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // collect topology information - topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + topologyInfo, _, err := collectTopologyInfo(commandEnv) if err != nil { return err } // volumeId is provided if vid != 0 { - return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid) + return doVolumeTierDownload(commandEnv, writer, *collection, vid) } // apply to all volumes in the collection @@ -71,7 +74,7 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr } fmt.Printf("tier download volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil { + if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil { return err } } @@ -83,9 +86,11 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s vidMap := make(map[uint32]bool) eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { - for _, v := range dn.VolumeInfos { - if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" { - vidMap[v.Id] = true + for _, diskInfo := range dn.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" { + vidMap[v.Id] = true + } } } }) @@ -97,7 +102,7 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s return } -func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { +func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { @@ -107,7 +112,7 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io // TODO parallelize this for _, loc := range locations { // copy the .dat file from remote tier to local - err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) + err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) if err != nil { return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err) } @@ -116,10 +121,10 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io return nil } -func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { +func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ + stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -145,14 +150,14 @@ func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOpti return downloadErr } - _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) if unmountErr != nil { return unmountErr } - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) if mountErr != nil { diff --git a/weed/shell/command_volume_tier_move.go b/weed/shell/command_volume_tier_move.go new file mode 100644 index 000000000..d6a49d6e1 --- /dev/null +++ b/weed/shell/command_volume_tier_move.go @@ -0,0 +1,177 @@ +package shell + +import ( + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierMove{}) +} + +type commandVolumeTierMove struct { +} + +func (c *commandVolumeTierMove) Name() string { + return "volume.tier.move" +} + +func (c *commandVolumeTierMove) Help() string { + return `change a volume from one disk type to another + + volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h] + + Even if the volume is replicated, only one replica will be changed and the rest replicas will be dropped. + So "volume.fix.replication" and "volume.balance" should be followed. + +` +} + +func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := tierCommand.String("collection", "", "the collection name") + fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period") + source := tierCommand.String("fromDiskType", "", "the source disk type") + target := tierCommand.String("toDiskType", "", "the target disk type") + applyChange := tierCommand.Bool("force", false, "actually apply the changes") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + fromDiskType := types.ToDiskType(*source) + toDiskType := types.ToDiskType(*target) + + if fromDiskType == toDiskType { + return fmt.Errorf("source tier %s is the same as target tier %s", fromDiskType, toDiskType) + } + + // collect topology information + topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + // collect all volumes that should change + volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("tier move volumes: %v\n", volumeIds) + + _, allLocations := collectVolumeReplicaLocations(topologyInfo) + for _, vid := range volumeIds { + if err = doVolumeTierMove(commandEnv, writer, *collection, vid, toDiskType, allLocations, *applyChange); err != nil { + fmt.Printf("tier move volume %d: %v\n", vid, err) + } + } + + return nil +} + +func isOneOf(server string, locations []wdclient.Location) bool { + for _, loc := range locations { + if server == loc.Url { + return true + } + } + return false +} + +func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location, applyChanges bool) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + // find one server with the most empty volume slots with target disk type + hasFoundTarget := false + keepDataNodesSorted(allLocations, toDiskType) + fn := capacityByFreeVolumeCount(toDiskType) + for _, dst := range allLocations { + if fn(dst.dataNode) > 0 && !hasFoundTarget { + // ask the volume server to replicate the volume + if isOneOf(dst.dataNode.Id, locations) { + continue + } + sourceVolumeServer := "" + for _, loc := range locations { + if loc.Url != dst.dataNode.Id { + sourceVolumeServer = loc.Url + } + } + if sourceVolumeServer == "" { + continue + } + fmt.Fprintf(writer, "moving volume %d from %s to %s with disk type %s ...\n", vid, sourceVolumeServer, dst.dataNode.Id, toDiskType.ReadableString()) + hasFoundTarget = true + + if !applyChanges { + break + } + + // mark all replicas as read only + if err = markVolumeReadonly(commandEnv.option.GrpcDialOption, vid, locations); err != nil { + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) + } + if err = LiveMoveVolume(commandEnv.option.GrpcDialOption, vid, sourceVolumeServer, dst.dataNode.Id, 5*time.Second, toDiskType.ReadableString()); err != nil { + return fmt.Errorf("move volume %d %s => %s : %v", vid, locations[0].Url, dst.dataNode.Id, err) + } + + // remove the remaining replicas + for _, loc := range locations { + if loc.Url != dst.dataNode.Id { + if err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.Url); err != nil { + fmt.Fprintf(writer, "failed to delete volume %d on %s\n", vid, loc.Url) + } + } + } + } + } + + if !hasFoundTarget { + fmt.Fprintf(writer, "can not find disk type %s for volume %d\n", toDiskType.ReadableString(), vid) + } + + return nil +} + +func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { + + quietSeconds := int64(quietPeriod / time.Second) + nowUnixSeconds := time.Now().Unix() + + fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds) + + vidMap := make(map[uint32]bool) + eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, diskInfo := range dn.DiskInfos { + for _, v := range diskInfo.VolumeInfos { + if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier { + if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 { + vidMap[v.Id] = true + } + } + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go index b3a0d9fe8..f92cdc3e4 100644 --- a/weed/shell/command_volume_tier_upload.go +++ b/weed/shell/command_volume_tier_upload.go @@ -26,7 +26,7 @@ func (c *commandVolumeTierUpload) Name() string { } func (c *commandVolumeTierUpload) Help() string { - return `move the dat file of a volume to a remote tier + return `upload the dat file of a volume to a remote tier volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h] volume.tier.upload [-collection=""] -volumeId=<volume_id> -dest=<storage_backend> [-keepLocalDatFile] @@ -56,6 +56,10 @@ func (c *commandVolumeTierUpload) Help() string { func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) volumeId := tierCommand.Int("volumeId", 0, "the volume id") collection := tierCommand.String("collection", "", "the collection name") @@ -67,23 +71,22 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // volumeId is provided if vid != 0 { - return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) + return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) } // apply to all volumes in the collection // reusing collectVolumeIdsForEcEncode for now - volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) if err != nil { return err } fmt.Printf("tier upload volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { + if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { return err } } @@ -91,20 +94,20 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ return nil } -func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { +func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { return fmt.Errorf("volume %d not found", vid) } - err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // copy the .dat file to remote tier - err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) + err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) if err != nil { return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err) } @@ -112,10 +115,10 @@ func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.W return nil } -func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { +func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ + stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, DestinationBackendName: dest, diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go index 8096f34d8..f7e5a501b 100644 --- a/weed/shell/command_volume_unmount.go +++ b/weed/shell/command_volume_unmount.go @@ -2,7 +2,7 @@ package shell import ( "context" - "fmt" + "flag" "io" "github.com/chrislusf/seaweedfs/weed/operation" @@ -25,7 +25,7 @@ func (c *commandVolumeUnmount) Name() string { func (c *commandVolumeUnmount) Help() string { return `unmount a volume from one volume server - volume.unmount <volume server host:port> <volume id> + volume.unmount -node <volume server host:port> -volumeId <volume id> This command unmounts a volume from one volume server. @@ -34,25 +34,28 @@ func (c *commandVolumeUnmount) Help() string { func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - if len(args) != 2 { - fmt.Fprintf(writer, "received args: %+v\n", args) - return fmt.Errorf("need 2 args of <volume server host:port> <volume id>") + if err = commandEnv.confirmIsLocked(); err != nil { + return } - sourceVolumeServer, volumeIdString := args[0], args[1] - volumeId, err := needle.NewVolumeId(volumeIdString) - if err != nil { - return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) + volUnmountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := volUnmountCommand.Int("volumeId", 0, "the volume id") + nodeStr := volUnmountCommand.String("node", "", "the volume server <host>:<port>") + if err = volUnmountCommand.Parse(args); err != nil { + return nil } - ctx := context.Background() - return unmountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + sourceVolumeServer := *nodeStr + + volumeId := needle.VolumeId(*volumeIdInt) + + return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { +func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) return unmountErr diff --git a/weed/shell/command_volume_vacuum.go b/weed/shell/command_volume_vacuum.go new file mode 100644 index 000000000..56f85f4fe --- /dev/null +++ b/weed/shell/command_volume_vacuum.go @@ -0,0 +1,53 @@ +package shell + +import ( + "context" + "flag" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func init() { + Commands = append(Commands, &commandVacuum{}) +} + +type commandVacuum struct { +} + +func (c *commandVacuum) Name() string { + return "volume.vacuum" +} + +func (c *commandVacuum) Help() string { + return `compact volumes if deleted entries are more than the limit + + volume.vacuum [-garbageThreshold=0.3] + +` +} + +func (c *commandVacuum) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + volumeVacuumCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + garbageThreshold := volumeVacuumCommand.Float64("garbageThreshold", 0.3, "vacuum when garbage is more than this limit") + if err = volumeVacuumCommand.Parse(args); err != nil { + return nil + } + + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err = client.VacuumVolume(context.Background(), &master_pb.VacuumVolumeRequest{ + GarbageThreshold: float32(*garbageThreshold), + }) + return err + }) + if err != nil { + return + } + + return nil +} diff --git a/weed/shell/commands.go b/weed/shell/commands.go index a6a0f7dec..0e285214b 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -1,19 +1,19 @@ package shell import ( - "context" "fmt" "io" "net/url" - "path/filepath" "strconv" "strings" "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/wdclient/exclusive_locks" ) type ShellOptions struct { @@ -29,6 +29,7 @@ type CommandEnv struct { env map[string]string MasterClient *wdclient.MasterClient option ShellOptions + locker *exclusive_locks.ExclusiveLocker } type command interface { @@ -42,55 +43,67 @@ var ( ) func NewCommandEnv(options ShellOptions) *CommandEnv { - return &CommandEnv{ - env: make(map[string]string), - MasterClient: wdclient.NewMasterClient(context.Background(), - options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), - option: options, + ce := &CommandEnv{ + env: make(map[string]string), + MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(*options.Masters, ",")), + option: options, } + ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient) + return ce } -func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) { +func (ce *CommandEnv) parseUrl(input string) (path string, err error) { if strings.HasPrefix(input, "http") { - return parseFilerUrl(input) + err = fmt.Errorf("http://<filer>:<port> prefix is not supported any more") + return } if !strings.HasPrefix(input, "/") { - input = filepath.ToSlash(filepath.Join(ce.option.Directory, input)) + input = util.Join(ce.option.Directory, input) } - return ce.option.FilerHost, ce.option.FilerPort, input, err + return input, err } -func (ce *CommandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool { +func (ce *CommandEnv) isDirectory(path string) bool { - return ce.checkDirectory(ctx, filerServer, filerPort, path) == nil + return ce.checkDirectory(path) == nil } -func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error { +func (ce *CommandEnv) confirmIsLocked() error { - dir, name := filer2.FullPath(path).DirAndName() + if ce.locker.IsLocking() { + return nil + } - return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return fmt.Errorf("need to lock to continue") - resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir, - Name: name, - }) - if lookupErr != nil { - return lookupErr - } +} - if resp.Entry == nil { - return fmt.Errorf("entry not found") - } +func (ce *CommandEnv) checkDirectory(path string) error { - if !resp.Entry.IsDirectory { - return fmt.Errorf("not a directory") - } + dir, name := util.FullPath(path).DirAndName() - return nil - }) + exists, err := filer_pb.Exists(ce, dir, name, true) + + if !exists { + return fmt.Errorf("%s is not a directory", path) + } + + return err + +} + +var _ = filer_pb.FilerClient(&CommandEnv{}) + +func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress := fmt.Sprintf("%s:%d", ce.option.FilerHost, ce.option.FilerPort+10000) + return pb.WithGrpcFilerClient(filerGrpcAddress, ce.option.GrpcDialOption, fn) + +} +func (ce *CommandEnv) AdjustedUrl(location *filer_pb.Location) string { + return location.Url } func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { @@ -107,7 +120,7 @@ func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path } path = u.Path } else { - err = fmt.Errorf("path should have full url http://<filer_server>:<port>/path/to/dirOrFile : %s", entryPath) + err = fmt.Errorf("path should have full url /path/to/dirOrFile : %s", entryPath) } return } diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index a4f17e0fa..1dd611ca5 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -2,13 +2,13 @@ package shell import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util/grace" "io" "os" "path" "regexp" - "strings" - "sort" + "strings" "github.com/peterh/liner" ) @@ -20,8 +20,15 @@ var ( func RunShell(options ShellOptions) { + sort.Slice(Commands, func(i, j int) bool { + return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 + }) + line = liner.NewLiner() defer line.Close() + grace.OnInterrupt(func() { + line.Close() + }) line.SetCtrlCAborts(true) @@ -46,51 +53,57 @@ func RunShell(options ShellOptions) { return } - cmds := reg.FindAllString(cmd, -1) - if len(cmds) == 0 { - continue - } else { - line.AppendHistory(cmd) + for _, c := range strings.Split(cmd, ";") { + if processEachCmd(reg, c, commandEnv) { + return + } + } + } +} - args := make([]string, len(cmds[1:])) +func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool { + cmds := reg.FindAllString(cmd, -1) + if len(cmds) == 0 { + return false + } else { + line.AppendHistory(cmd) - for i := range args { - args[i] = strings.Trim(string(cmds[1+i]), "\"'") - } + args := make([]string, len(cmds[1:])) - cmd := strings.ToLower(cmds[0]) - if cmd == "help" || cmd == "?" { - printHelp(cmds) - } else if cmd == "exit" || cmd == "quit" { - return - } else { - foundCommand := false - for _, c := range Commands { - if c.Name() == cmd { - if err := c.Do(args, commandEnv, os.Stdout); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - } - foundCommand = true + for i := range args { + args[i] = strings.Trim(string(cmds[1+i]), "\"'") + } + + cmd := cmds[0] + if cmd == "help" || cmd == "?" { + printHelp(cmds) + } else if cmd == "exit" || cmd == "quit" { + return true + } else { + foundCommand := false + for _, c := range Commands { + if c.Name() == cmd || c.Name() == "fs."+cmd { + if err := c.Do(args, commandEnv, os.Stdout); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) } - } - if !foundCommand { - fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd) + foundCommand = true } } - + if !foundCommand { + fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd) + } } + } + return false } func printGenericHelp() { msg := - `Type: "help <command>" for help on <command> + `Type: "help <command>" for help on <command>. Most commands support "<command> -h" also for options. ` fmt.Print(msg) - sort.Slice(Commands, func(i, j int) bool { - return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 - }) for _, c := range Commands { helpTexts := strings.SplitN(c.Help(), "\n", 2) fmt.Printf(" %-30s\t# %s \n", c.Name(), helpTexts[0]) @@ -106,10 +119,6 @@ func printHelp(cmds []string) { } else { cmd := strings.ToLower(args[0]) - sort.Slice(Commands, func(i, j int) bool { - return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 - }) - for _, c := range Commands { if c.Name() == cmd { fmt.Printf(" %s\t# %s\n", c.Name(), c.Help()) diff --git a/weed/stats/disk.go b/weed/stats/disk.go index e9d8baedd..a8f906213 100644 --- a/weed/stats/disk.go +++ b/weed/stats/disk.go @@ -1,9 +1,15 @@ package stats -import "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) { disk = &volume_server_pb.DiskStatus{Dir: path} fillInDiskStatus(disk) + if disk.PercentUsed > 95 { + glog.V(0).Infof("disk status: %v", disk) + } return } diff --git a/weed/stats/disk_notsupported.go b/weed/stats/disk_notsupported.go index ace662f6a..3d99e6ce7 100644 --- a/weed/stats/disk_notsupported.go +++ b/weed/stats/disk_notsupported.go @@ -1,4 +1,4 @@ -// +build windows openbsd netbsd plan9 solaris +// +build openbsd netbsd plan9 solaris package stats diff --git a/weed/stats/disk_supported.go b/weed/stats/disk_supported.go index 0537828b0..dff580b5b 100644 --- a/weed/stats/disk_supported.go +++ b/weed/stats/disk_supported.go @@ -17,5 +17,7 @@ func fillInDiskStatus(disk *volume_server_pb.DiskStatus) { disk.All = fs.Blocks * uint64(fs.Bsize) disk.Free = fs.Bfree * uint64(fs.Bsize) disk.Used = disk.All - disk.Free + disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100) + disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100) return } diff --git a/weed/stats/disk_windows.go b/weed/stats/disk_windows.go new file mode 100644 index 000000000..3cfa52c0b --- /dev/null +++ b/weed/stats/disk_windows.go @@ -0,0 +1,47 @@ +package stats + +import ( + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "golang.org/x/sys/windows" + "syscall" + "unsafe" +) + +var ( + kernel32 = windows.NewLazySystemDLL("Kernel32.dll") + getDiskFreeSpaceEx = kernel32.NewProc("GetDiskFreeSpaceExW") +) + +func fillInDiskStatus(disk *volume_server_pb.DiskStatus) { + + ptr, err := syscall.UTF16PtrFromString(disk.Dir) + + if err != nil { + return + } + var _temp uint64 + /* #nosec */ + r, _, e := syscall.Syscall6( + getDiskFreeSpaceEx.Addr(), + 4, + uintptr(unsafe.Pointer(ptr)), + uintptr(unsafe.Pointer(&disk.Free)), + uintptr(unsafe.Pointer(&disk.All)), + uintptr(unsafe.Pointer(&_temp)), + 0, + 0, + ) + + if r == 0 { + if e != 0 { + return + } + + return + } + disk.Used = disk.All - disk.Free + disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100) + disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100) + + return +} diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go index a9624cd86..3f5d851a4 100644 --- a/weed/stats/metrics.go +++ b/weed/stats/metrics.go @@ -2,17 +2,21 @@ package stats import ( "fmt" + "log" + "net/http" "os" + "strings" "time" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/push" + + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( - FilerGather = prometheus.NewRegistry() - VolumeServerGather = prometheus.NewRegistry() + Gather = prometheus.NewRegistry() FilerRequestCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -73,6 +77,14 @@ var ( Help: "Number of volumes or shards.", }, []string{"collection", "type"}) + VolumeServerReadOnlyVolumeGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "SeaweedFS", + Subsystem: "volumeServer", + Name: "read_only_volumes", + Help: "Number of read only volumes.", + }, []string{"collection", "type"}) + VolumeServerMaxVolumeCounter = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: "SeaweedFS", @@ -88,55 +100,84 @@ var ( Name: "total_disk_size", Help: "Actual disk size used by volumes.", }, []string{"collection", "type"}) -) -func init() { + VolumeServerResourceGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "SeaweedFS", + Subsystem: "volumeServer", + Name: "resource", + Help: "Resource usage", + }, []string{"name", "type"}) - FilerGather.MustRegister(FilerRequestCounter) - FilerGather.MustRegister(FilerRequestHistogram) - FilerGather.MustRegister(FilerStoreCounter) - FilerGather.MustRegister(FilerStoreHistogram) - FilerGather.MustRegister(prometheus.NewGoCollector()) + S3RequestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "SeaweedFS", + Subsystem: "s3", + Name: "request_total", + Help: "Counter of s3 requests.", + }, []string{"type", "code"}) + S3RequestHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "SeaweedFS", + Subsystem: "s3", + Name: "request_seconds", + Help: "Bucketed histogram of s3 request processing time.", + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24), + }, []string{"type"}) +) - VolumeServerGather.MustRegister(VolumeServerRequestCounter) - VolumeServerGather.MustRegister(VolumeServerRequestHistogram) - VolumeServerGather.MustRegister(VolumeServerVolumeCounter) - VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter) - VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge) +func init() { + Gather.MustRegister(FilerRequestCounter) + Gather.MustRegister(FilerRequestHistogram) + Gather.MustRegister(FilerStoreCounter) + Gather.MustRegister(FilerStoreHistogram) + Gather.MustRegister(prometheus.NewGoCollector()) + + Gather.MustRegister(VolumeServerRequestCounter) + Gather.MustRegister(VolumeServerRequestHistogram) + Gather.MustRegister(VolumeServerVolumeCounter) + Gather.MustRegister(VolumeServerMaxVolumeCounter) + Gather.MustRegister(VolumeServerReadOnlyVolumeGauge) + Gather.MustRegister(VolumeServerDiskSizeGauge) + Gather.MustRegister(VolumeServerResourceGauge) + + Gather.MustRegister(S3RequestCounter) + Gather.MustRegister(S3RequestHistogram) } -func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnGetMetricsDest func() (addr string, intervalSeconds int)) { +func LoopPushingMetric(name, instance, addr string, intervalSeconds int) { - if fnGetMetricsDest == nil { + if addr == "" || intervalSeconds == 0 { return } - addr, intervalSeconds := fnGetMetricsDest() - pusher := push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance) - currentAddr := addr + glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds) + + pusher := push.New(addr, name).Gatherer(Gather).Grouping("instance", instance) for { - if currentAddr != "" { - err := pusher.Push() - if err != nil { - glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err) - } + err := pusher.Push() + if err != nil && !strings.HasPrefix(err.Error(), "unexpected status code 200") { + glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err) } if intervalSeconds <= 0 { intervalSeconds = 15 } time.Sleep(time.Duration(intervalSeconds) * time.Second) - addr, intervalSeconds = fnGetMetricsDest() - if currentAddr != addr { - pusher = push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance) - currentAddr = addr - } } } -func SourceName(port int) string { +func StartMetricsServer(port int) { + if port == 0 { + return + } + http.Handle("/metrics", promhttp.HandlerFor(Gather, promhttp.HandlerOpts{})) + log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) +} + +func SourceName(port uint32) string { hostname, err := os.Hostname() if err != nil { return "unknown" diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index 6ea850543..2dc61d02e 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -1,6 +1,7 @@ package backend import ( + "github.com/chrislusf/seaweedfs/weed/util" "io" "os" "strings" @@ -9,7 +10,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/spf13/viper" ) type BackendStorageFile interface { @@ -19,6 +19,7 @@ type BackendStorageFile interface { io.Closer GetStat() (datSize int64, modTime time.Time, err error) Name() string + Sync() error } type BackendStorage interface { @@ -35,7 +36,7 @@ type StringProperties interface { type StorageType string type BackendStorageFactory interface { StorageType() StorageType - BuildStorage(configuration StringProperties, id string) (BackendStorage, error) + BuildStorage(configuration StringProperties, configPrefix string, id string) (BackendStorage, error) } var ( @@ -44,23 +45,24 @@ var ( ) // used by master to load remote storage configurations -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *util.ViperProxy) { StorageBackendPrefix := "storage.backend" - backendSub := config.Sub(StorageBackendPrefix) - for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] if !found { glog.Fatalf("backend storage type %s not found", backendTypeName) } - backendTypeSub := backendSub.Sub(backendTypeName) - for backendStorageId := range backendSub.GetStringMap(backendTypeName) { - if !backendTypeSub.GetBool(backendStorageId + ".enabled") { + for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { + if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { + continue + } + if _, found := BackendStorages[backendTypeName+"."+backendStorageId]; found { continue } - backendStorage, buildErr := backendStorageFactory.BuildStorage(backendTypeSub.Sub(backendStorageId), backendStorageId) + backendStorage, buildErr := backendStorageFactory.BuildStorage(config, + StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) if buildErr != nil { glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) } @@ -82,7 +84,10 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { glog.Warningf("storage type %s not found", storageBackend.Type) continue } - backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), storageBackend.Id) + if _, found := BackendStorages[storageBackend.Type+"."+storageBackend.Id]; found { + continue + } + backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) if buildErr != nil { glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) } diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index c4b3caffb..3b42429cf 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -1,6 +1,8 @@ package backend import ( + "github.com/chrislusf/seaweedfs/weed/glog" + . "github.com/chrislusf/seaweedfs/weed/storage/types" "os" "time" ) @@ -12,12 +14,25 @@ var ( type DiskFile struct { File *os.File fullFilePath string + fileSize int64 + modTime time.Time } func NewDiskFile(f *os.File) *DiskFile { + stat, err := f.Stat() + if err != nil { + glog.Fatalf("stat file %s: %v", f.Name(), err) + } + offset := stat.Size() + if offset%NeedlePaddingSize != 0 { + offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize) + } + return &DiskFile{ fullFilePath: f.Name(), File: f, + fileSize: offset, + modTime: stat.ModTime(), } } @@ -26,11 +41,28 @@ func (df *DiskFile) ReadAt(p []byte, off int64) (n int, err error) { } func (df *DiskFile) WriteAt(p []byte, off int64) (n int, err error) { - return df.File.WriteAt(p, off) + n, err = df.File.WriteAt(p, off) + if err == nil { + waterMark := off + int64(n) + if waterMark > df.fileSize { + df.fileSize = waterMark + df.modTime = time.Now() + } + } + return +} + +func (df *DiskFile) Write(p []byte) (n int, err error) { + return df.WriteAt(p, df.fileSize) } func (df *DiskFile) Truncate(off int64) error { - return df.File.Truncate(off) + err := df.File.Truncate(off) + if err == nil { + df.fileSize = off + df.modTime = time.Now() + } + return err } func (df *DiskFile) Close() error { @@ -38,13 +70,13 @@ func (df *DiskFile) Close() error { } func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) { - stat, e := df.File.Stat() - if e == nil { - return stat.Size(), stat.ModTime(), nil - } - return 0, time.Time{}, err + return df.fileSize, df.modTime, nil } func (df *DiskFile) Name() string { return df.fullFilePath } + +func (df *DiskFile) Sync() error { + return df.File.Sync() +} diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go index 03e7308d0..8ff03d9af 100644 --- a/weed/storage/backend/memory_map/memory_map_backend.go +++ b/weed/storage/backend/memory_map/memory_map_backend.go @@ -3,12 +3,10 @@ package memory_map import ( "os" "time" - - "github.com/chrislusf/seaweedfs/weed/storage/backend" ) var ( - _ backend.BackendStorageFile = &MemoryMappedFile{} +// _ backend.BackendStorageFile = &MemoryMappedFile{} // remove this to break import cycle ) type MemoryMappedFile struct { @@ -58,3 +56,7 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er func (mmf *MemoryMappedFile) Name() string { return mmf.mm.File.Name() } + +func (mm *MemoryMappedFile) Sync() error { + return nil +} diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 9f03cfa81..4706c9334 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -26,8 +26,8 @@ type S3BackendFactory struct { func (factory *S3BackendFactory) StorageType() backend.StorageType { return backend.StorageType("s3") } -func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, id string) (backend.BackendStorage, error) { - return newS3BackendStorage(configuration, id) +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, configPrefix, id) } type S3BackendStorage struct { @@ -36,17 +36,20 @@ type S3BackendStorage struct { aws_secret_access_key string region string bucket string + endpoint string conn s3iface.S3API } -func newS3BackendStorage(configuration backend.StringProperties, id string) (s *S3BackendStorage, err error) { +func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) { s = &S3BackendStorage{} s.id = id - s.aws_access_key_id = configuration.GetString("aws_access_key_id") - s.aws_secret_access_key = configuration.GetString("aws_secret_access_key") - s.region = configuration.GetString("region") - s.bucket = configuration.GetString("bucket") - s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) + s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id") + s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") + s.region = configuration.GetString(configPrefix + "region") + s.bucket = configuration.GetString(configPrefix + "bucket") + s.endpoint = configuration.GetString(configPrefix + "endpoint") + + s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint) glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) return @@ -58,6 +61,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string { m["aws_secret_access_key"] = s.aws_secret_access_key m["region"] = s.region m["bucket"] = s.bucket + m["endpoint"] = s.endpoint return m } @@ -175,3 +179,7 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi func (s3backendStorageFile S3BackendStorageFile) Name() string { return s3backendStorageFile.key } + +func (s3backendStorageFile S3BackendStorageFile) Sync() error { + return nil +} diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go index 5fdbcb66b..b8378c379 100644 --- a/weed/storage/backend/s3_backend/s3_sessions.go +++ b/weed/storage/backend/s3_backend/s3_sessions.go @@ -24,7 +24,7 @@ func getSession(region string) (s3iface.S3API, bool) { return sess, found } -func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S3API, error) { +func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string) (s3iface.S3API, error) { sessionsLock.Lock() defer sessionsLock.Unlock() @@ -34,7 +34,9 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S } config := &aws.Config{ - Region: aws.String(region), + Region: aws.String(region), + Endpoint: aws.String(endpoint), + S3ForcePathStyle: aws.Bool(true), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go new file mode 100644 index 000000000..d4bd8e40f --- /dev/null +++ b/weed/storage/backend/volume_create.go @@ -0,0 +1,20 @@ +// +build !linux,!windows + +package backend + +import ( + "os" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { + file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if e != nil { + return nil, e + } + if preallocate > 0 { + glog.V(2).Infof("Preallocated disk space for %s is not supported", fileName) + } + return NewDiskFile(file), nil +} diff --git a/weed/storage/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go index ee599ac32..260c2c2a3 100644 --- a/weed/storage/volume_create_linux.go +++ b/weed/storage/backend/volume_create_linux.go @@ -1,23 +1,22 @@ // +build linux -package storage +package backend import ( "os" "syscall" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { +func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if e != nil { return nil, e } if preallocate != 0 { syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) - glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) + glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) } - return backend.NewDiskFile(file), nil + return NewDiskFile(file), nil } diff --git a/weed/storage/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go index e1c0b961f..7d40ec0d7 100644 --- a/weed/storage/volume_create_windows.go +++ b/weed/storage/backend/volume_create_windows.go @@ -1,17 +1,16 @@ // +build windows -package storage +package backend import ( "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" "golang.org/x/sys/windows" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { +func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { if preallocate > 0 { glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) } @@ -27,7 +26,7 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32 if e != nil { return nil, e } - return backend.NewDiskFile(file), nil + return NewDiskFile(file), nil } } diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index e116fc715..ed4e00312 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -1,45 +1,68 @@ package storage import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "io/ioutil" "os" + "path/filepath" "strings" "sync" - - "fmt" + "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) type DiskLocation struct { - Directory string - MaxVolumeCount int - volumes map[needle.VolumeId]*Volume - volumesLock sync.RWMutex + Directory string + IdxDirectory string + DiskType types.DiskType + MaxVolumeCount int + OriginalMaxVolumeCount int + MinFreeSpacePercent float32 + volumes map[needle.VolumeId]*Volume + volumesLock sync.RWMutex // erasure coding ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume ecVolumesLock sync.RWMutex + + isDiskSpaceLow bool } -func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { - location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount} +func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string, diskType types.DiskType) *DiskLocation { + dir = util.ResolvePath(dir) + if idxDir == "" { + idxDir = dir + } else { + idxDir = util.ResolvePath(idxDir) + } + location := &DiskLocation{ + Directory: dir, + IdxDirectory: idxDir, + DiskType: diskType, + MaxVolumeCount: maxVolumeCount, + OriginalMaxVolumeCount: maxVolumeCount, + MinFreeSpacePercent: minFreeSpacePercent, + } location.volumes = make(map[needle.VolumeId]*Volume) location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume) + go location.CheckDiskSpace() return location } -func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (needle.VolumeId, string, error) { - name := dir.Name() - if !dir.IsDir() && strings.HasSuffix(name, ".idx") { - base := name[:len(name)-len(".idx")] +func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) { + if isValidVolume(filename) { + base := filename[:len(filename)-4] collection, volumeId, err := parseCollectionVolumeId(base) return volumeId, collection, err } - return 0, "", fmt.Errorf("Path is not a volume: %s", name) + return 0, "", fmt.Errorf("file is not a volume: %s", filename) } func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) { @@ -51,38 +74,83 @@ func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeI return collection, vol, err } -func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) { - name := fileInfo.Name() - if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") { - vid, collection, err := l.volumeIdFromPath(fileInfo) - if err == nil { - l.volumesLock.RLock() - _, found := l.volumes[vid] - l.volumesLock.RUnlock() - if !found { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0); e == nil { - l.volumesLock.Lock() - l.volumes[vid] = v - l.volumesLock.Unlock() - size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", - l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) - // println("volume", vid, "last append at", v.lastAppendAtNs) - } else { - glog.V(0).Infof("new volume %s error %s", name, e) - } - } - } +func isValidVolume(basename string) bool { + return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif") +} + +func getValidVolumeName(basename string) string { + if isValidVolume(basename) { + return basename[:len(basename)-4] } + return "" } -func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) { +func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool { + basename := fileInfo.Name() + if fileInfo.IsDir() { + return false + } + volumeName := getValidVolumeName(basename) + if volumeName == "" { + return false + } + + // check for incomplete volume + noteFile := l.Directory + "/" + volumeName + ".note" + if util.FileExists(noteFile) { + note, _ := ioutil.ReadFile(noteFile) + glog.Warningf("volume %s was not completed: %s", volumeName, string(note)) + removeVolumeFiles(l.Directory + "/" + volumeName) + removeVolumeFiles(l.IdxDirectory + "/" + volumeName) + return false + } + + // parse out collection, volume id + vid, collection, err := volumeIdFromFileName(basename) + if err != nil { + glog.Warningf("get volume id failed, %s, err : %s", volumeName, err) + return false + } + + // avoid loading one volume more than once + l.volumesLock.RLock() + _, found := l.volumes[vid] + l.volumesLock.RUnlock() + if found { + glog.V(1).Infof("loaded volume, %v", vid) + return true + } + + // load the volume + v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0) + if e != nil { + glog.V(0).Infof("new volume %s error %s", volumeName, e) + return false + } + + l.SetVolume(vid, v) + + size, _, _ := v.FileStat() + glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", + l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) + return true +} + +func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) { task_queue := make(chan os.FileInfo, 10*concurrency) go func() { - if dirs, err := ioutil.ReadDir(l.Directory); err == nil { - for _, dir := range dirs { - task_queue <- dir + foundVolumeNames := make(map[string]bool) + if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { + for _, fi := range fileInfos { + volumeName := getValidVolumeName(fi.Name()) + if volumeName == "" { + continue + } + if _, found := foundVolumeNames[volumeName]; !found { + foundVolumeNames[volumeName] = true + task_queue <- fi + } } } close(task_queue) @@ -93,8 +161,8 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con wg.Add(1) go func() { defer wg.Done() - for dir := range task_queue { - l.loadExistingVolume(dir, needleMapKind) + for fi := range task_queue { + _ = l.loadExistingVolume(fi, needleMapKind) } }() } @@ -102,7 +170,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con } -func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { +func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) { l.concurrentLoadingVolumes(needleMapKind, 10) glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) @@ -158,7 +226,7 @@ func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e er return } -func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { +func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) { v, ok := l.volumes[vid] if !ok { return @@ -167,21 +235,15 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { if e != nil { return } + found = true delete(l.volumes, vid) return } -func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool { - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fileInfo := range fileInfos { - volId, _, err := l.volumeIdFromPath(fileInfo) - if vid == volId && err == nil { - l.loadExistingVolume(fileInfo, needleMapKind) - return true - } - } +func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool { + if fileInfo, found := l.LocateVolume(vid); found { + return l.loadExistingVolume(fileInfo, needleMapKind) } - return false } @@ -193,7 +255,8 @@ func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error { if !ok { return fmt.Errorf("Volume not found, VolumeId: %d", vid) } - return l.deleteVolumeById(vid) + _, err := l.deleteVolumeById(vid) + return err } func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error { @@ -217,7 +280,7 @@ func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[need } } - for k, _ := range deltaVols { + for k := range deltaVols { delete(l.volumes, k) } return deltaVols @@ -228,6 +291,7 @@ func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) { defer l.volumesLock.Unlock() l.volumes[vid] = volume + volume.location = l } func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) { @@ -260,3 +324,53 @@ func (l *DiskLocation) Close() { return } + +func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { + if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { + for _, fileInfo := range fileInfos { + volId, _, err := volumeIdFromFileName(fileInfo.Name()) + if vid == volId && err == nil { + return fileInfo, true + } + } + } + + return nil, false +} + +func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) { + + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() + + for _, vol := range l.volumes { + if vol.IsReadOnly() { + continue + } + datSize, idxSize, _ := vol.FileStat() + unUsedSpace += volumeSizeLimit - (datSize + idxSize) + } + + return +} + +func (l *DiskLocation) CheckDiskSpace() { + for { + if dir, e := filepath.Abs(l.Directory); e == nil { + s := stats.NewDiskStatus(dir) + stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All)) + stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used)) + stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free)) + if (s.PercentFree < l.MinFreeSpacePercent) != l.isDiskSpaceLow { + l.isDiskSpaceLow = !l.isDiskSpaceLow + } + if l.isDiskSpaceLow { + glog.V(0).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow) + } else { + glog.V(4).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow) + } + } + time.Sleep(time.Minute) + } + +} diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index f6c44e966..91c7d86a6 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "io/ioutil" + "os" "path" "regexp" "sort" @@ -13,7 +14,7 @@ import ( ) var ( - re = regexp.MustCompile("\\.ec[0-9][0-9]") + re = regexp.MustCompile(`\.ec[0-9][0-9]`) ) func (l *DiskLocation) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) { @@ -56,15 +57,18 @@ func (l *DiskLocation) FindEcShard(vid needle.VolumeId, shardId erasure_coding.S func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) (err error) { - ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.Directory, collection, vid, shardId) + ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.DiskType, l.Directory, collection, vid, shardId) if err != nil { + if err == os.ErrNotExist { + return os.ErrNotExist + } return fmt.Errorf("failed to create ec shard %d.%d: %v", vid, shardId, err) } l.ecVolumesLock.Lock() defer l.ecVolumesLock.Unlock() ecVolume, found := l.ecVolumes[vid] if !found { - ecVolume, err = erasure_coding.NewEcVolume(l.Directory, collection, vid) + ecVolume, err = erasure_coding.NewEcVolume(l.DiskType, l.Directory, l.IdxDirectory, collection, vid) if err != nil { return fmt.Errorf("failed to create ec volume %d: %v", vid, err) } @@ -118,6 +122,13 @@ func (l *DiskLocation) loadAllEcShards() (err error) { if err != nil { return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err) } + if l.IdxDirectory != l.Directory { + indexFileInfos, err := ioutil.ReadDir(l.IdxDirectory) + if err != nil { + return fmt.Errorf("load all ec shards in dir %s: %v", l.IdxDirectory, err) + } + fileInfos = append(fileInfos, indexFileInfos...) + } sort.Slice(fileInfos, func(i, j int) bool { return fileInfos[i].Name() < fileInfos[j].Name() @@ -183,3 +194,10 @@ func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[ne } return deltaVols } + +func (l *DiskLocation) EcVolumesLen() int { + l.ecVolumesLock.RLock() + defer l.ecVolumesLock.RUnlock() + + return len(l.ecVolumes) +} diff --git a/weed/storage/erasure_coding/389.ecx b/weed/storage/erasure_coding/389.ecx Binary files differnew file mode 100644 index 000000000..158781920 --- /dev/null +++ b/weed/storage/erasure_coding/389.ecx diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go index ae77cee3f..47d3c6550 100644 --- a/weed/storage/erasure_coding/ec_decoder.go +++ b/weed/storage/erasure_coding/ec_decoder.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) // write .idx file from .ecx and .ecj files @@ -44,20 +45,20 @@ func WriteIdxFileFromEcIndex(baseFileName string) (err error) { // FindDatFileSize calculate .dat file size from max offset entry // there may be extra deletions after that entry // but they are deletions anyway -func FindDatFileSize(baseFileName string) (datSize int64, err error) { +func FindDatFileSize(dataBaseFileName, indexBaseFileName string) (datSize int64, err error) { - version, err := readEcVolumeVersion(baseFileName) + version, err := readEcVolumeVersion(dataBaseFileName) if err != nil { - return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err) + return 0, fmt.Errorf("read ec volume %s version: %v", dataBaseFileName, err) } - err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error { + err = iterateEcxFile(indexBaseFileName, func(key types.NeedleId, offset types.Offset, size types.Size) error { - if size == types.TombstoneFileSize { + if size.IsDeleted() { return nil } - entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version) + entryStopOffset := offset.ToActualOffset() + needle.GetActualSize(size, version) if datSize < entryStopOffset { datSize = entryStopOffset } @@ -87,7 +88,7 @@ func readEcVolumeVersion(baseFileName string) (version needle.Version, err error } -func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error { +func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size types.Size) error) error { ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) if openErr != nil { return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) @@ -118,9 +119,12 @@ func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId } func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { + if !util.FileExists(baseFileName + ".ecj") { + return nil + } ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) if openErr != nil { - return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + return fmt.Errorf("cannot open ec index %s.ecj: %v", baseFileName, openErr) } defer ecjFile.Close() diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index eeb384b91..34b639407 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -5,12 +5,13 @@ import ( "io" "os" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/klauspost/reedsolomon" ) const ( @@ -25,9 +26,12 @@ const ( // all keys are sorted in ascending order func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) + if nm != nil { + defer nm.Close() + } if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) @@ -36,7 +40,7 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { } defer ecxFile.Close() - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { bytes := value.ToBytes() _, writeErr := ecxFile.Write(bytes) return writeErr @@ -73,6 +77,8 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, if err != nil { return fmt.Errorf("failed to stat dat file: %v", err) } + + glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) err = encodeDatFile(fi.Size(), err, baseFileName, bufferSize, largeBlockSize, file, smallBlockSize) if err != nil { return fmt.Errorf("encodeDatFile: %v", err) @@ -195,7 +201,7 @@ func encodeDatFile(remainingSize int64, err error, baseFileName string, bufferSi } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { buffers[i] = make([]byte, bufferSize) } @@ -232,7 +238,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { if shardHasData[i] { buffers[i] = make([]byte, ErasureCodingSmallBlockSize) } @@ -280,15 +286,15 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } -func readCompactMap(baseFileName string) (*needle_map.CompactMap, error) { +func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) { indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644) if err != nil { return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err) } defer indexFile.Close() - cm := needle_map.NewCompactMap() - err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { + cm := needle_map.NewMemDb() + err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { if !offset.IsZero() && size != types.TombstoneFileSize { cm.Set(key, offset, size) } else { diff --git a/weed/storage/erasure_coding/ec_locate.go b/weed/storage/erasure_coding/ec_locate.go index 562966f8f..19eba6235 100644 --- a/weed/storage/erasure_coding/ec_locate.go +++ b/weed/storage/erasure_coding/ec_locate.go @@ -1,14 +1,18 @@ package erasure_coding +import ( + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + type Interval struct { BlockIndex int InnerBlockOffset int64 - Size uint32 + Size types.Size IsLargeBlock bool LargeBlockRowsCount int } -func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size uint32) (intervals []Interval) { +func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size types.Size) (intervals []Interval) { blockIndex, isLargeBlock, innerBlockOffset := locateOffset(largeBlockLength, smallBlockLength, datSize, offset) // adding DataShardsCount*smallBlockLength to ensure we can derive the number of large block size from a shard size @@ -32,7 +36,7 @@ func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset intervals = append(intervals, interval) return } - interval.Size = uint32(blockRemaining) + interval.Size = types.Size(blockRemaining) intervals = append(intervals, interval) size -= interval.Size diff --git a/weed/storage/erasure_coding/ec_shard.go b/weed/storage/erasure_coding/ec_shard.go index 47e6d3d1e..2a57d85ef 100644 --- a/weed/storage/erasure_coding/ec_shard.go +++ b/weed/storage/erasure_coding/ec_shard.go @@ -2,9 +2,11 @@ package erasure_coding import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "os" "path" "strconv" + "strings" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -19,21 +21,25 @@ type EcVolumeShard struct { dir string ecdFile *os.File ecdFileSize int64 + DiskType types.DiskType } -func NewEcVolumeShard(dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) { +func NewEcVolumeShard(diskType types.DiskType, dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) { - v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId} + v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId, DiskType: diskType} baseFileName := v.FileName() // open ecd file if v.ecdFile, e = os.OpenFile(baseFileName+ToExt(int(shardId)), os.O_RDONLY, 0644); e != nil { - return nil, fmt.Errorf("cannot read ec volume shard %s.%s: %v", baseFileName, ToExt(int(shardId)), e) + if e == os.ErrNotExist || strings.Contains(e.Error(), "no such file or directory") { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("cannot read ec volume shard %s%s: %v", baseFileName, ToExt(int(shardId)), e) } ecdFi, statErr := v.ecdFile.Stat() if statErr != nil { - return nil, fmt.Errorf("can not stat ec volume shard %s.%s: %v", baseFileName, ToExt(int(shardId)), statErr) + return nil, fmt.Errorf("can not stat ec volume shard %s%s: %v", baseFileName, ToExt(int(shardId)), statErr) } v.ecdFileSize = ecdFi.Size() diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index 0e4aaa27c..0d48bec02 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -7,9 +7,10 @@ import ( "os" "testing" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/klauspost/reedsolomon" ) const ( @@ -41,9 +42,10 @@ func TestEncodingDecoding(t *testing.T) { } func validateFiles(baseFileName string) error { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) + defer nm.Close() if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) @@ -60,7 +62,7 @@ func validateFiles(baseFileName string) error { ecFiles, err := openEcFiles(baseFileName, true) defer closeEcFiles(ecFiles) - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { return assertSame(datFile, fi.Size(), ecFiles, value.Offset, value.Size) }) if err != nil { @@ -69,7 +71,7 @@ func validateFiles(baseFileName string) error { return nil } -func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) error { +func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset types.Offset, size types.Size) error { data, err := readDatFile(datFile, offset, size) if err != nil { @@ -88,10 +90,10 @@ func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset type return nil } -func readDatFile(datFile *os.File, offset types.Offset, size uint32) ([]byte, error) { +func readDatFile(datFile *os.File, offset types.Offset, size types.Size) ([]byte, error) { data := make([]byte, size) - n, err := datFile.ReadAt(data, offset.ToAcutalOffset()) + n, err := datFile.ReadAt(data, offset.ToActualOffset()) if err != nil { return nil, fmt.Errorf("failed to ReadAt dat file: %v", err) } @@ -101,9 +103,9 @@ func readDatFile(datFile *os.File, offset types.Offset, size uint32) ([]byte, er return data, nil } -func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) (data []byte, err error) { +func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size types.Size) (data []byte, err error) { - intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToAcutalOffset(), size) + intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToActualOffset(), size) for i, interval := range intervals { if d, e := readOneInterval(interval, ecFiles); e != nil { @@ -138,7 +140,7 @@ func readOneInterval(interval Interval, ecFiles []*os.File) (data []byte, err er return } -func readFromOtherEcFiles(ecFiles []*os.File, ecFileIndex int, ecFileOffset int64, size uint32) (data []byte, err error) { +func readFromOtherEcFiles(ecFiles []*os.File, ecFileIndex int, ecFileOffset int64, size types.Size) (data []byte, err error) { enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount) if err != nil { return nil, fmt.Errorf("failed to create encoder: %v", err) diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 579f037fb..171db92a4 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -25,6 +25,7 @@ type EcVolume struct { VolumeId needle.VolumeId Collection string dir string + dirIdx string ecxFile *os.File ecxFileSize int64 ecxCreatedAt time.Time @@ -35,35 +36,37 @@ type EcVolume struct { Version needle.Version ecjFile *os.File ecjFileAccessLock sync.Mutex + diskType types.DiskType } -func NewEcVolume(dir string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) { - ev = &EcVolume{dir: dir, Collection: collection, VolumeId: vid} +func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) { + ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid, diskType: diskType} - baseFileName := EcShardFileName(collection, dir, int(vid)) + dataBaseFileName := EcShardFileName(collection, dir, int(vid)) + indexBaseFileName := EcShardFileName(collection, dirIdx, int(vid)) // open ecx file - if ev.ecxFile, err = os.OpenFile(baseFileName+".ecx", os.O_RDWR, 0644); err != nil { - return nil, fmt.Errorf("cannot open ec volume index %s.ecx: %v", baseFileName, err) + if ev.ecxFile, err = os.OpenFile(indexBaseFileName+".ecx", os.O_RDWR, 0644); err != nil { + return nil, fmt.Errorf("cannot open ec volume index %s.ecx: %v", indexBaseFileName, err) } ecxFi, statErr := ev.ecxFile.Stat() if statErr != nil { - return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", baseFileName, statErr) + return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", indexBaseFileName, statErr) } ev.ecxFileSize = ecxFi.Size() ev.ecxCreatedAt = ecxFi.ModTime() // open ecj file - if ev.ecjFile, err = os.OpenFile(baseFileName+".ecj", os.O_RDWR|os.O_CREATE, 0644); err != nil { - return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", baseFileName, err) + if ev.ecjFile, err = os.OpenFile(indexBaseFileName+".ecj", os.O_RDWR|os.O_CREATE, 0644); err != nil { + return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", indexBaseFileName, err) } // read volume info ev.Version = needle.Version3 - if volumeInfo, found := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found { + if volumeInfo, _, found, _ := pb.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found { ev.Version = needle.Version(volumeInfo.Version) } else { - pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) + pb.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) } ev.ShardLocations = make(map[ShardId][]string) @@ -134,24 +137,42 @@ func (ev *EcVolume) Destroy() { for _, s := range ev.Shards { s.Destroy() } - os.Remove(ev.FileName() + ".ecx") - os.Remove(ev.FileName() + ".ecj") - os.Remove(ev.FileName() + ".vif") + os.Remove(ev.FileName(".ecx")) + os.Remove(ev.FileName(".ecj")) + os.Remove(ev.FileName(".vif")) } -func (ev *EcVolume) FileName() string { +func (ev *EcVolume) FileName(ext string) string { + switch ext { + case ".ecx", ".ecj": + return ev.IndexBaseFileName() + ext + } + // .vif + return ev.DataBaseFileName() + ext +} +func (ev *EcVolume) DataBaseFileName() string { return EcShardFileName(ev.Collection, ev.dir, int(ev.VolumeId)) +} +func (ev *EcVolume) IndexBaseFileName() string { + return EcShardFileName(ev.Collection, ev.dirIdx, int(ev.VolumeId)) } -func (ev *EcVolume) ShardSize() int64 { +func (ev *EcVolume) ShardSize() uint64 { if len(ev.Shards) > 0 { - return ev.Shards[0].Size() + return uint64(ev.Shards[0].Size()) } return 0 } +func (ev *EcVolume) Size() (size int64) { + for _, shard := range ev.Shards { + size += shard.Size() + } + return +} + func (ev *EcVolume) CreatedAt() time.Time { return ev.ecxCreatedAt } @@ -171,6 +192,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V m = &master_pb.VolumeEcShardInformationMessage{ Id: uint32(s.VolumeId), Collection: s.Collection, + DiskType: string(ev.diskType), } messages = append(messages, m) } @@ -180,7 +202,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V return } -func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size uint32, intervals []Interval, err error) { +func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size types.Size, intervals []Interval, err error) { // find the needle from ecx file offset, size, err = ev.FindNeedleFromEcx(needleId) @@ -191,16 +213,16 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle. shard := ev.Shards[0] // calculate the locations in the ec shards - intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, version))) + intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version))) return } -func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size uint32, err error) { +func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size types.Size, err error) { return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil) } -func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) { +func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size types.Size, err error) { var key types.NeedleId buf := make([]byte, types.NeedleMapEntrySize) l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize diff --git a/weed/storage/erasure_coding/ec_volume_delete.go b/weed/storage/erasure_coding/ec_volume_delete.go index 822a9e923..a7f8c24a3 100644 --- a/weed/storage/erasure_coding/ec_volume_delete.go +++ b/weed/storage/erasure_coding/ec_volume_delete.go @@ -12,7 +12,7 @@ import ( var ( MarkNeedleDeleted = func(file *os.File, offset int64) error { b := make([]byte, types.SizeSize) - util.Uint32toBytes(b, types.TombstoneFileSize) + types.SizeToBytes(b, types.TombstoneFileSize) n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize) if err != nil { return fmt.Errorf("sorted needle write error: %v", err) diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index 8ff65bb0f..3dd535e64 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -10,13 +10,15 @@ type EcVolumeInfo struct { VolumeId needle.VolumeId Collection string ShardBits ShardBits + DiskType string } -func NewEcVolumeInfo(collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo { +func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo { return &EcVolumeInfo{ Collection: collection, VolumeId: vid, ShardBits: shardBits, + DiskType: diskType, } } @@ -45,6 +47,7 @@ func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo { VolumeId: ecInfo.VolumeId, Collection: ecInfo.Collection, ShardBits: ecInfo.ShardBits.Minus(other.ShardBits), + DiskType: ecInfo.DiskType, } return ret @@ -55,6 +58,7 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb. Id: uint32(ecInfo.VolumeId), EcIndexBits: uint32(ecInfo.ShardBits), Collection: ecInfo.Collection, + DiskType: ecInfo.DiskType, } } diff --git a/weed/storage/erasure_coding/ec_volume_test.go b/weed/storage/erasure_coding/ec_volume_test.go new file mode 100644 index 000000000..747ef4aab --- /dev/null +++ b/weed/storage/erasure_coding/ec_volume_test.go @@ -0,0 +1,54 @@ +package erasure_coding + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func TestPositioning(t *testing.T) { + + ecxFile, err := os.OpenFile("389.ecx", os.O_RDONLY, 0) + if err != nil { + t.Errorf("failed to open ecx file: %v", err) + } + defer ecxFile.Close() + + stat, _ := ecxFile.Stat() + fileSize := stat.Size() + + tests := []struct { + needleId string + offset int64 + size int + }{ + {needleId: "0f0edb92", offset: 31300679656, size: 1167}, + {needleId: "0ef7d7f8", offset: 11513014944, size: 66044}, + } + + for _, test := range tests { + needleId, _ := types.ParseNeedleId(test.needleId) + offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil) + assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex") + fmt.Printf("offset: %d size: %d\n", offset.ToActualOffset(), size) + } + + needleId, _ := types.ParseNeedleId("0f087622") + offset, size, err := SearchNeedleFromSortedIndex(ecxFile, fileSize, needleId, nil) + assert.Equal(t, nil, err, "SearchNeedleFromSortedIndex") + fmt.Printf("offset: %d size: %d\n", offset.ToActualOffset(), size) + + var shardEcdFileSize int64 = 1118830592 // 1024*1024*1024*3 + intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, needle.CurrentVersion))) + + for _, interval := range intervals { + shardId, shardOffset := interval.ToShardIdAndOffset(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) + fmt.Printf("interval: %+v, shardId: %d, shardOffset: %d\n", interval, shardId, shardOffset) + } + +} diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go index 90efb75e6..5215d3c4f 100644 --- a/weed/storage/idx/walk.go +++ b/weed/storage/idx/walk.go @@ -2,25 +2,26 @@ package idx import ( "io" - "os" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" ) // walks through the index file, calls fn function with each key, offset, size // stops with the error returned by the fn function -func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error { +func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error { var readerOffset int64 bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead) count, e := r.ReadAt(bytes, readerOffset) - glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) + if count == 0 && e == io.EOF { + return nil + } + glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) var ( key types.NeedleId offset types.Offset - size uint32 + size types.Size i int ) @@ -35,16 +36,16 @@ func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, return nil } count, e = r.ReadAt(bytes, readerOffset) - glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e) + glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) } return e } -func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size uint32) { +func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size types.Size) { key = types.BytesToNeedleId(bytes[:types.NeedleIdSize]) offset = types.BytesToOffset(bytes[types.NeedleIdSize : types.NeedleIdSize+types.OffsetSize]) - size = util.BytesToUint32(bytes[types.NeedleIdSize+types.OffsetSize : types.NeedleIdSize+types.OffsetSize+types.SizeSize]) + size = types.BytesToSize(bytes[types.NeedleIdSize+types.OffsetSize : types.NeedleIdSize+types.OffsetSize+types.SizeSize]) return } diff --git a/weed/storage/needle/async_request.go b/weed/storage/needle/async_request.go new file mode 100644 index 000000000..ea02c55c5 --- /dev/null +++ b/weed/storage/needle/async_request.go @@ -0,0 +1,53 @@ +package needle + +type AsyncRequest struct { + N *Needle + IsWriteRequest bool + ActualSize int64 + offset uint64 + size uint64 + doneChan chan interface{} + isUnchanged bool + err error +} + +func NewAsyncRequest(n *Needle, isWriteRequest bool) *AsyncRequest { + return &AsyncRequest{ + offset: 0, + size: 0, + ActualSize: 0, + doneChan: make(chan interface{}), + N: n, + isUnchanged: false, + IsWriteRequest: isWriteRequest, + err: nil, + } +} + +func (r *AsyncRequest) WaitComplete() (uint64, uint64, bool, error) { + <-r.doneChan + return r.offset, r.size, r.isUnchanged, r.err +} + +func (r *AsyncRequest) Complete(offset uint64, size uint64, isUnchanged bool, err error) { + r.offset = offset + r.size = size + r.isUnchanged = isUnchanged + r.err = err + close(r.doneChan) +} + +func (r *AsyncRequest) UpdateResult(offset uint64, size uint64, isUnchanged bool, err error) { + r.offset = offset + r.size = size + r.isUnchanged = isUnchanged + r.err = err +} + +func (r *AsyncRequest) Submit() { + close(r.doneChan) +} + +func (r *AsyncRequest) IsSucceed() bool { + return r.err == nil +} diff --git a/weed/storage/needle/crc.go b/weed/storage/needle/crc.go index 00ea1db69..4476631c2 100644 --- a/weed/storage/needle/crc.go +++ b/weed/storage/needle/crc.go @@ -1,11 +1,12 @@ package needle import ( - "crypto/md5" "fmt" + "io" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/klauspost/crc32" + + "github.com/chrislusf/seaweedfs/weed/util" ) var table = crc32.MakeTable(crc32.Castagnoli) @@ -30,12 +31,24 @@ func (n *Needle) Etag() string { return fmt.Sprintf("%x", bits) } -func (n *Needle) MD5() string { +func NewCRCwriter(w io.Writer) *CRCwriter { - hash := md5.New() + return &CRCwriter{ + crc: CRC(0), + w: w, + } - hash.Write(n.Data) +} - return fmt.Sprintf("%x", hash.Sum(nil)) +type CRCwriter struct { + crc CRC + w io.Writer +} +func (c *CRCwriter) Write(p []byte) (n int, err error) { + n, err = c.w.Write(p) // with each write ... + c.crc = c.crc.Update(p) + return } + +func (c *CRCwriter) Sum() uint32 { return c.crc.Value() } // final hash diff --git a/weed/storage/needle/file_id.go b/weed/storage/needle/file_id.go index 5dabb0f25..6055bdd1c 100644 --- a/weed/storage/needle/file_id.go +++ b/weed/storage/needle/file_id.go @@ -66,7 +66,7 @@ func formatNeedleIdCookie(key NeedleId, cookie Cookie) string { NeedleIdToBytes(bytes[0:NeedleIdSize], key) CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], cookie) nonzero_index := 0 - for ; bytes[nonzero_index] == 0; nonzero_index++ { + for ; bytes[nonzero_index] == 0 && nonzero_index < NeedleIdSize; nonzero_index++ { } return hex.EncodeToString(bytes[nonzero_index:]) } diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 2f03ba87b..34d29ab6e 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -8,8 +8,6 @@ import ( "strings" "time" - "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -26,7 +24,7 @@ const ( type Needle struct { Cookie Cookie `comment:"random number to mitigate brute force lookups"` Id NeedleId `comment:"needle id"` - Size uint32 `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"` + Size Size `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"` DataSize uint32 `comment:"Data size"` //version2 Data []byte `comment:"The actual file data"` @@ -46,57 +44,33 @@ type Needle struct { } func (n *Needle) String() (str string) { - str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime) + str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s Compressed:%v", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime, n.IsCompressed()) return } -func ParseUpload(r *http.Request) ( - fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int, - modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { - pairMap = make(map[string]string) - for k, v := range r.Header { - if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { - pairMap[k] = v[0] - } - } - - if r.Method == "POST" { - fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r) - } else { - isGzipped = false - mimeType = r.Header.Get("Content-Type") - fileName = "" - data, e = ioutil.ReadAll(r.Body) - originalDataSize = len(data) - } - if e != nil { - return - } - - modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) - ttl, _ = ReadTTL(r.FormValue("ttl")) - - return -} -func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle, originalSize int, e error) { - var pairMap map[string]string - fname, mimeType, isGzipped, isChunkedFile := "", "", false, false +func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, contentMd5 string, e error) { n = new(Needle) - fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + pu, e := ParseUpload(r, sizeLimit) if e != nil { return } - if len(fname) < 256 { - n.Name = []byte(fname) + n.Data = pu.Data + originalSize = pu.OriginalDataSize + n.LastModified = pu.ModifiedTime + n.Ttl = pu.Ttl + contentMd5 = pu.ContentMd5 + + if len(pu.FileName) < 256 { + n.Name = []byte(pu.FileName) n.SetHasName() } - if len(mimeType) < 256 { - n.Mime = []byte(mimeType) + if len(pu.MimeType) < 256 { + n.Mime = []byte(pu.MimeType) n.SetHasMime() } - if len(pairMap) != 0 { + if len(pu.PairMap) != 0 { trimmedPairMap := make(map[string]string) - for k, v := range pairMap { + for k, v := range pu.PairMap { trimmedPairMap[k[len(PairNamePrefix):]] = v } @@ -107,8 +81,9 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle n.SetHasPairs() } } - if isGzipped { - n.SetGzipped() + if pu.IsGzipped { + // println(r.URL.Path, "is set to compressed", pu.FileName, pu.IsGzipped, "dataSize", pu.OriginalDataSize) + n.SetIsCompressed() } if n.LastModified == 0 { n.LastModified = uint64(time.Now().Unix()) @@ -118,13 +93,13 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle n.SetHasTtl() } - if isChunkedFile { + if pu.IsChunkedFile { n.SetIsChunkManifest() } if fixJpgOrientation { - loweredName := strings.ToLower(fname) - if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { + loweredName := strings.ToLower(pu.FileName) + if pu.MimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { n.Data = images.FixJpgOrientation(n.Data) } } diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go deleted file mode 100644 index 8be1a1da4..000000000 --- a/weed/storage/needle/needle_parse_multipart.go +++ /dev/null @@ -1,109 +0,0 @@ -package needle - -import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - - "io" - "io/ioutil" - "mime" - "net/http" - "path" - "strconv" - "strings" -) - -func parseMultipart(r *http.Request) ( - fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { - defer func() { - if e != nil && r.Body != nil { - io.Copy(ioutil.Discard, r.Body) - r.Body.Close() - } - }() - form, fe := r.MultipartReader() - if fe != nil { - glog.V(0).Infoln("MultipartReader [ERROR]", fe) - e = fe - return - } - - //first multi-part item - part, fe := form.NextPart() - if fe != nil { - glog.V(0).Infoln("Reading Multi part [ERROR]", fe) - e = fe - return - } - - fileName = part.FileName() - if fileName != "" { - fileName = path.Base(fileName) - } - - data, e = ioutil.ReadAll(part) - if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) - return - } - - //if the filename is empty string, do a search on the other multi-part items - for fileName == "" { - part2, fe := form.NextPart() - if fe != nil { - break // no more or on error, just safely break - } - - fName := part2.FileName() - - //found the first <file type> multi-part has filename - if fName != "" { - data2, fe2 := ioutil.ReadAll(part2) - if fe2 != nil { - glog.V(0).Infoln("Reading Content [ERROR]", fe2) - e = fe2 - return - } - - //update - data = data2 - fileName = path.Base(fName) - break - } - } - - originalDataSize = len(data) - - isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) - - if !isChunkedFile { - - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } - - if part.Header.Get("Content-Encoding") == "gzip" { - if unzipped, e := util.UnGzipData(data); e == nil { - originalDataSize = len(unzipped) - } - isGzipped = true - } else if util.IsGzippable(ext, mtype, data) { - if compressedData, err := util.GzipData(data); err == nil { - if len(data) > len(compressedData) { - data = compressedData - isGzipped = true - } - } - } - } - - return -} diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go new file mode 100644 index 000000000..7201503f1 --- /dev/null +++ b/weed/storage/needle/needle_parse_upload.go @@ -0,0 +1,201 @@ +package needle + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type ParsedUpload struct { + FileName string + Data []byte + MimeType string + PairMap map[string]string + IsGzipped bool + // IsZstd bool + OriginalDataSize int + ModifiedTime uint64 + Ttl *TTL + IsChunkedFile bool + UncompressedData []byte + ContentMd5 string +} + +func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) { + pu = &ParsedUpload{} + pu.PairMap = make(map[string]string) + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { + pu.PairMap[k] = v[0] + } + } + + if r.Method == "POST" { + e = parseMultipart(r, sizeLimit, pu) + } else { + e = parsePut(r, sizeLimit, pu) + } + if e != nil { + return + } + + pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) + pu.Ttl, _ = ReadTTL(r.FormValue("ttl")) + + pu.OriginalDataSize = len(pu.Data) + pu.UncompressedData = pu.Data + // println("received data", len(pu.Data), "isGzipped", pu.IsGzipped, "mime", pu.MimeType, "name", pu.FileName) + if pu.IsGzipped { + if unzipped, e := util.DecompressData(pu.Data); e == nil { + pu.OriginalDataSize = len(unzipped) + pu.UncompressedData = unzipped + // println("ungzipped data size", len(unzipped)) + } + } else { + ext := filepath.Base(pu.FileName) + mimeType := pu.MimeType + if mimeType == "" { + mimeType = http.DetectContentType(pu.Data) + } + // println("detected mimetype to", pu.MimeType) + if mimeType == "application/octet-stream" { + mimeType = "" + } + if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure { + // println("ext", ext, "iAmSure", iAmSure, "shouldBeCompressed", shouldBeCompressed, "mimeType", pu.MimeType) + if compressedData, err := util.GzipData(pu.Data); err == nil { + if len(compressedData)*10 < len(pu.Data)*9 { + pu.Data = compressedData + pu.IsGzipped = true + } + // println("gzipped data size", len(compressedData)) + } + } + } + + // md5 + h := md5.New() + h.Write(pu.UncompressedData) + pu.ContentMd5 = base64.StdEncoding.EncodeToString(h.Sum(nil)) + if expectedChecksum := r.Header.Get("Content-MD5"); expectedChecksum != "" { + if expectedChecksum != pu.ContentMd5 { + e = fmt.Errorf("Content-MD5 did not match md5 of file data expected [%s] received [%s] size %d", expectedChecksum, pu.ContentMd5, len(pu.UncompressedData)) + return + } + } + + return +} + +func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip" + // pu.IsZstd = r.Header.Get("Content-Encoding") == "zstd" + pu.MimeType = r.Header.Get("Content-Type") + pu.FileName = "" + pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) + if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 { + io.Copy(ioutil.Discard, r.Body) + } + r.Body.Close() + return nil +} + +func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + defer func() { + if e != nil && r.Body != nil { + io.Copy(ioutil.Discard, r.Body) + r.Body.Close() + } + }() + form, fe := r.MultipartReader() + if fe != nil { + glog.V(0).Infoln("MultipartReader [ERROR]", fe) + e = fe + return + } + + // first multi-part item + part, fe := form.NextPart() + if fe != nil { + glog.V(0).Infoln("Reading Multi part [ERROR]", fe) + e = fe + return + } + + pu.FileName = part.FileName() + if pu.FileName != "" { + pu.FileName = path.Base(pu.FileName) + } + + pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) + if e != nil { + glog.V(0).Infoln("Reading Content [ERROR]", e) + return + } + if len(pu.Data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + // if the filename is empty string, do a search on the other multi-part items + for pu.FileName == "" { + part2, fe := form.NextPart() + if fe != nil { + break // no more or on error, just safely break + } + + fName := part2.FileName() + + // found the first <file type> multi-part has filename + if fName != "" { + data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) + if fe2 != nil { + glog.V(0).Infoln("Reading Content [ERROR]", fe2) + e = fe2 + return + } + if len(data2) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + // update + pu.Data = data2 + pu.FileName = path.Base(fName) + break + } + } + + pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + + if !pu.IsChunkedFile { + + dotIndex := strings.LastIndex(pu.FileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(pu.FileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && contentType != "application/octet-stream" && mtype != contentType { + pu.MimeType = contentType // only return mime type if not deductable + mtype = contentType + } + + } + pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip" + // pu.IsZstd = part.Header.Get("Content-Encoding") == "zstd" + + return +} diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go index 7f8aa4823..16c2fd06b 100644 --- a/weed/storage/needle/needle_read_write.go +++ b/weed/storage/needle/needle_read_write.go @@ -3,17 +3,16 @@ package needle import ( "errors" "fmt" - "io" - "math" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math" ) const ( - FlagGzip = 0x01 + FlagIsCompressed = 0x01 FlagHasName = 0x02 FlagHasMime = 0x04 FlagHasLastModifiedDate = 0x08 @@ -24,11 +23,13 @@ const ( TtlBytesLength = 2 ) +var ErrorSizeMismatch = errors.New("size mismatch") + func (n *Needle) DiskSize(version Version) int64 { return GetActualSize(n.Size, version) } -func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, error) { +func (n *Needle) prepareWriteBuffer(version Version) ([]byte, Size, int64, error) { writeBytes := make([]byte, 0) @@ -37,8 +38,8 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err header := make([]byte, NeedleHeaderSize) CookieToBytes(header[0:CookieSize], n.Cookie) NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id) - n.Size = uint32(len(n.Data)) - util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) + n.Size = Size(len(n.Data)) + SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) size := n.Size actualSize := NeedleHeaderSize + int64(n.Size) writeBytes = append(writeBytes, header...) @@ -58,12 +59,12 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err } n.DataSize, n.MimeSize = uint32(len(n.Data)), uint8(len(n.Mime)) if n.DataSize > 0 { - n.Size = 4 + n.DataSize + 1 + n.Size = 4 + Size(n.DataSize) + 1 if n.HasName() { - n.Size = n.Size + 1 + uint32(n.NameSize) + n.Size = n.Size + 1 + Size(n.NameSize) } if n.HasMime() { - n.Size = n.Size + 1 + uint32(n.MimeSize) + n.Size = n.Size + 1 + Size(n.MimeSize) } if n.HasLastModifiedDate() { n.Size = n.Size + LastModifiedBytesLength @@ -72,12 +73,12 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err n.Size = n.Size + TtlBytesLength } if n.HasPairs() { - n.Size += 2 + uint32(n.PairsSize) + n.Size += 2 + Size(n.PairsSize) } } else { n.Size = 0 } - util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) + SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) writeBytes = append(writeBytes, header[0:NeedleHeaderSize]...) if n.DataSize > 0 { util.Uint32toBytes(header[0:4], n.DataSize) @@ -119,13 +120,13 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err writeBytes = append(writeBytes, header[0:NeedleChecksumSize+TimestampSize+padding]...) } - return writeBytes, n.DataSize, GetActualSize(n.Size, version), nil + return writeBytes, Size(n.DataSize), GetActualSize(n.Size, version), nil } return writeBytes, 0, 0, fmt.Errorf("Unsupported Version! (%d)", version) } -func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset uint64, size uint32, actualSize int64, err error) { +func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset uint64, size Size, actualSize int64, err error) { if end, _, e := w.GetStat(); e == nil { defer func(w backend.BackendStorageFile, off int64) { @@ -140,6 +141,10 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) return } + if offset >= MaxPossibleVolumeSize { + err = fmt.Errorf("Volume Size %d Exeededs %d", offset, MaxPossibleVolumeSize) + return + } bytesToWrite, size, actualSize, err := n.prepareWriteBuffer(version) @@ -150,21 +155,63 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u return offset, size, actualSize, err } -func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size uint32, version Version) (dataSlice []byte, err error) { +func WriteNeedleBlob(w backend.BackendStorageFile, dataSlice []byte, size Size, appendAtNs uint64, version Version) (offset uint64, err error) { + + if end, _, e := w.GetStat(); e == nil { + defer func(w backend.BackendStorageFile, off int64) { + if err != nil { + if te := w.Truncate(end); te != nil { + glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + } + } + }(w, end) + offset = uint64(end) + } else { + err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) + return + } + + if version == Version3 { + tsOffset := NeedleHeaderSize + size + NeedleChecksumSize + util.Uint64toBytes(dataSlice[tsOffset:tsOffset+TimestampSize], appendAtNs) + } + + if err == nil { + _, err = w.WriteAt(dataSlice, int64(offset)) + } + + return + +} + +func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, version Version) (dataSlice []byte, err error) { dataSize := GetActualSize(size, version) dataSlice = make([]byte, int(dataSize)) - _, err = r.ReadAt(dataSlice, offset) + var n int + n, err = r.ReadAt(dataSlice, offset) + if err != nil && int64(n) == dataSize { + err = nil + } + if err != nil { + fileSize, _, _ := r.GetStat() + println("n", n, "dataSize", dataSize, "offset", offset, "fileSize", fileSize) + } return dataSlice, err } // ReadBytes hydrates the needle from the bytes buffer, with only n.Id is set. -func (n *Needle) ReadBytes(bytes []byte, offset int64, size uint32, version Version) (err error) { +func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Version) (err error) { n.ParseNeedleHeader(bytes) if n.Size != size { - return fmt.Errorf("entry not found: offset %d found id %d size %d, expected size %d", offset, n.Id, n.Size, size) + // cookie is not always passed in for this API. Use size to do preliminary checking. + if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) { + glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) + return ErrorSizeMismatch + } + return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) } switch version { case Version1: @@ -191,7 +238,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size uint32, version Vers } // ReadData hydrates the needle from the file, with only n.Id is set. -func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size uint32, version Version) (err error) { +func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size Size, version Version) (err error) { bytes, err := ReadNeedleBlob(r, offset, size, version) if err != nil { return err @@ -202,7 +249,7 @@ func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size uint3 func (n *Needle) ParseNeedleHeader(bytes []byte) { n.Cookie = BytesToCookie(bytes[0:CookieSize]) n.Id = BytesToNeedleId(bytes[CookieSize : CookieSize+NeedleIdSize]) - n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleHeaderSize]) + n.Size = BytesToSize(bytes[CookieSize+NeedleIdSize : NeedleHeaderSize]) } func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { @@ -284,7 +331,7 @@ func ReadNeedleHeader(r backend.BackendStorageFile, version Version, offset int6 return } -func PaddingLength(needleSize uint32, version Version) uint32 { +func PaddingLength(needleSize Size, version Version) Size { if version == Version3 { // this is same value as version2, but just listed here for clarity return NeedlePaddingSize - ((NeedleHeaderSize + needleSize + NeedleChecksumSize + TimestampSize) % NeedlePaddingSize) @@ -292,7 +339,7 @@ func PaddingLength(needleSize uint32, version Version) uint32 { return NeedlePaddingSize - ((NeedleHeaderSize + needleSize + NeedleChecksumSize) % NeedlePaddingSize) } -func NeedleBodyLength(needleSize uint32, version Version) int64 { +func NeedleBodyLength(needleSize Size, version Version) int64 { if version == Version3 { return int64(needleSize) + NeedleChecksumSize + TimestampSize + int64(PaddingLength(needleSize, version)) } @@ -339,11 +386,11 @@ func (n *Needle) ReadNeedleBodyBytes(needleBody []byte, version Version) (err er return } -func (n *Needle) IsGzipped() bool { - return n.Flags&FlagGzip > 0 +func (n *Needle) IsCompressed() bool { + return n.Flags&FlagIsCompressed > 0 } -func (n *Needle) SetGzipped() { - n.Flags = n.Flags | FlagGzip +func (n *Needle) SetIsCompressed() { + n.Flags = n.Flags | FlagIsCompressed } func (n *Needle) HasName() bool { return n.Flags&FlagHasName > 0 @@ -386,6 +433,6 @@ func (n *Needle) SetHasPairs() { n.Flags = n.Flags | FlagHasPairs } -func GetActualSize(size uint32, version Version) int64 { +func GetActualSize(size Size, version Version) int64 { return NeedleHeaderSize + NeedleBodyLength(size, version) } diff --git a/weed/storage/needle/needle_read_write_test.go b/weed/storage/needle/needle_read_write_test.go index 47582dd26..afcea5a05 100644 --- a/weed/storage/needle/needle_read_write_test.go +++ b/weed/storage/needle/needle_read_write_test.go @@ -48,7 +48,7 @@ func TestAppend(t *testing.T) { int64 : -9223372036854775808 to 9223372036854775807 */ - fileSize := int64(4294967295) + 10000 + fileSize := int64(4294967296) + 10000 tempFile.Truncate(fileSize) defer func() { tempFile.Close() diff --git a/weed/storage/needle/volume_ttl.go b/weed/storage/needle/volume_ttl.go index 4a169870d..d0de3768e 100644 --- a/weed/storage/needle/volume_ttl.go +++ b/weed/storage/needle/volume_ttl.go @@ -1,11 +1,12 @@ package needle import ( + "fmt" "strconv" ) const ( - //stored unit types + // stored unit types Empty byte = iota Minute Hour @@ -69,6 +70,9 @@ func (t *TTL) ToBytes(output []byte) { } func (t *TTL) ToUint32() (output uint32) { + if t == nil || t.Count == 0 { + return 0 + } output = uint32(t.Count) << 8 output += uint32(t.Unit) return output @@ -130,9 +134,49 @@ func (t TTL) Minutes() uint32 { case Week: return uint32(t.Count) * 60 * 24 * 7 case Month: - return uint32(t.Count) * 60 * 24 * 31 + return uint32(t.Count) * 60 * 24 * 30 case Year: return uint32(t.Count) * 60 * 24 * 365 } return 0 } + +func SecondsToTTL(seconds int32) string { + if seconds == 0 { + return "" + } + if seconds%(3600*24*365) == 0 && seconds/(3600*24*365) < 256 { + return fmt.Sprintf("%dy", seconds/(3600*24*365)) + } + if seconds%(3600*24*30) == 0 && seconds/(3600*24*30) < 256 { + return fmt.Sprintf("%dM", seconds/(3600*24*30)) + } + if seconds%(3600*24*7) == 0 && seconds/(3600*24*7) < 256 { + return fmt.Sprintf("%dw", seconds/(3600*24*7)) + } + if seconds%(3600*24) == 0 && seconds/(3600*24) < 256 { + return fmt.Sprintf("%dd", seconds/(3600*24)) + } + if seconds%(3600) == 0 && seconds/(3600) < 256 { + return fmt.Sprintf("%dh", seconds/(3600)) + } + if seconds/60 < 256 { + return fmt.Sprintf("%dm", seconds/60) + } + if seconds/(3600) < 256 { + return fmt.Sprintf("%dh", seconds/(3600)) + } + if seconds/(3600*24) < 256 { + return fmt.Sprintf("%dd", seconds/(3600*24)) + } + if seconds/(3600*24*7) < 256 { + return fmt.Sprintf("%dw", seconds/(3600*24*7)) + } + if seconds/(3600*24*30) < 256 { + return fmt.Sprintf("%dM", seconds/(3600*24*30)) + } + if seconds/(3600*24*365) < 256 { + return fmt.Sprintf("%dy", seconds/(3600*24*365)) + } + return "" +} diff --git a/weed/storage/needle/volume_ttl_test.go b/weed/storage/needle/volume_ttl_test.go index 0afebebf5..150d06e6e 100644 --- a/weed/storage/needle/volume_ttl_test.go +++ b/weed/storage/needle/volume_ttl_test.go @@ -30,13 +30,18 @@ func TestTTLReadWrite(t *testing.T) { t.Errorf("5d ttl:%v", ttl) } + ttl, _ = ReadTTL("50d") + if ttl.Minutes() != 50*24*60 { + t.Errorf("50d ttl:%v", ttl) + } + ttl, _ = ReadTTL("5w") if ttl.Minutes() != 5*7*24*60 { t.Errorf("5w ttl:%v", ttl) } ttl, _ = ReadTTL("5M") - if ttl.Minutes() != 5*31*24*60 { + if ttl.Minutes() != 5*30*24*60 { t.Errorf("5M ttl:%v", ttl) } diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go index 77d081ea7..d35391f66 100644 --- a/weed/storage/needle_map.go +++ b/weed/storage/needle_map.go @@ -1,25 +1,26 @@ package storage import ( - "fmt" + "io" "os" "sync" + "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) -type NeedleMapType int +type NeedleMapKind int const ( - NeedleMapInMemory NeedleMapType = iota + NeedleMapInMemory NeedleMapKind = iota NeedleMapLevelDb // small memory footprint, 4MB total, 1 write buffer, 3 block buffer NeedleMapLevelDbMedium // medium memory footprint, 8MB total, 3 write buffer, 5 block buffer NeedleMapLevelDbLarge // large memory footprint, 12MB total, 4write buffer, 8 block buffer ) type NeedleMapper interface { - Put(key NeedleId, offset Offset, size uint32) error + Put(key NeedleId, offset Offset, size Size) error Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) Delete(key NeedleId, offset Offset) error Close() @@ -30,6 +31,8 @@ type NeedleMapper interface { DeletedCount() int MaxFileKey() NeedleId IndexFileSize() uint64 + Sync() error + ReadIndexEntry(n int64) (key NeedleId, offset Offset, size Size, err error) } type baseNeedleMapper struct { @@ -37,6 +40,7 @@ type baseNeedleMapper struct { indexFile *os.File indexFileAccessLock sync.Mutex + indexFileOffset int64 } func (nm *baseNeedleMapper) IndexFileSize() uint64 { @@ -47,15 +51,35 @@ func (nm *baseNeedleMapper) IndexFileSize() uint64 { return 0 } -func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size uint32) error { +func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size Size) error { bytes := needle_map.ToBytes(key, offset, size) nm.indexFileAccessLock.Lock() defer nm.indexFileAccessLock.Unlock() - if _, err := nm.indexFile.Seek(0, 2); err != nil { - return fmt.Errorf("cannot seek end of indexfile %s: %v", - nm.indexFile.Name(), err) + written, err := nm.indexFile.WriteAt(bytes, nm.indexFileOffset) + if err == nil { + nm.indexFileOffset += int64(written) } - _, err := nm.indexFile.Write(bytes) return err } + +func (nm *baseNeedleMapper) Sync() error { + return nm.indexFile.Sync() +} + +func (nm *baseNeedleMapper) ReadIndexEntry(n int64) (key NeedleId, offset Offset, size Size, err error) { + bytes := make([]byte, NeedleMapEntrySize) + var readCount int + if readCount, err = nm.indexFile.ReadAt(bytes, n*NeedleMapEntrySize); err != nil { + if err == io.EOF { + if readCount == NeedleMapEntrySize { + err = nil + } + } + if err != nil { + return + } + } + key, offset, size = idx.IdxFileEntry(bytes) + return +} diff --git a/weed/storage/needle_map/compact_map.go b/weed/storage/needle_map/compact_map.go index 76783d0b0..2b1a471bc 100644 --- a/weed/storage/needle_map/compact_map.go +++ b/weed/storage/needle_map/compact_map.go @@ -18,7 +18,7 @@ const SectionalNeedleIdLimit = 1<<32 - 1 type SectionalNeedleValue struct { Key SectionalNeedleId OffsetLower OffsetLower `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G - Size uint32 `comment:"Size of the data portion"` + Size Size `comment:"Size of the data portion"` } type SectionalNeedleValueExtra struct { @@ -50,7 +50,7 @@ func NewCompactSection(start NeedleId) *CompactSection { } //return old entry size -func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) { +func (cs *CompactSection) Set(key NeedleId, offset Offset, size Size) (oldOffset Offset, oldSize Size) { cs.Lock() if key > cs.end { cs.end = key @@ -80,7 +80,7 @@ func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffs return } -func (cs *CompactSection) setOverflowEntry(skey SectionalNeedleId, offset Offset, size uint32) { +func (cs *CompactSection) setOverflowEntry(skey SectionalNeedleId, offset Offset, size Size) { needleValue := SectionalNeedleValue{Key: skey, OffsetLower: offset.OffsetLower, Size: size} needleValueExtra := SectionalNeedleValueExtra{OffsetHigher: offset.OffsetHigher} insertCandidate := sort.Search(len(cs.overflow), func(i int) bool { @@ -115,24 +115,21 @@ func (cs *CompactSection) deleteOverflowEntry(key SectionalNeedleId) { return cs.overflow[i].Key >= key }) if deleteCandidate != length && cs.overflow[deleteCandidate].Key == key { - for i := deleteCandidate; i < length-1; i++ { - cs.overflow[i] = cs.overflow[i+1] - cs.overflowExtra[i] = cs.overflowExtra[i+1] + if cs.overflow[deleteCandidate].Size.IsValid() { + cs.overflow[deleteCandidate].Size = -cs.overflow[deleteCandidate].Size } - cs.overflow = cs.overflow[0 : length-1] - cs.overflowExtra = cs.overflowExtra[0 : length-1] } } //return old entry size -func (cs *CompactSection) Delete(key NeedleId) uint32 { +func (cs *CompactSection) Delete(key NeedleId) Size { skey := SectionalNeedleId(key - cs.start) cs.Lock() - ret := uint32(0) + ret := Size(0) if i := cs.binarySearchValues(skey); i >= 0 { - if cs.values[i].Size > 0 && cs.values[i].Size != TombstoneFileSize { + if cs.values[i].Size > 0 && cs.values[i].Size.IsValid() { ret = cs.values[i].Size - cs.values[i].Size = TombstoneFileSize + cs.values[i].Size = -cs.values[i].Size } } if _, v, found := cs.findOverflowEntry(skey); found { @@ -181,7 +178,7 @@ func NewCompactMap() *CompactMap { return &CompactMap{} } -func (cm *CompactMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) { +func (cm *CompactMap) Set(key NeedleId, offset Offset, size Size) (oldOffset Offset, oldSize Size) { x := cm.binarySearchCompactSection(key) if x < 0 || (key-cm.list[x].start) > SectionalNeedleIdLimit { // println(x, "adding to existing", len(cm.list), "sections, starting", key) @@ -204,10 +201,10 @@ func (cm *CompactMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset O // println(key, "set to section[", x, "].start", cm.list[x].start) return cm.list[x].Set(key, offset, size) } -func (cm *CompactMap) Delete(key NeedleId) uint32 { +func (cm *CompactMap) Delete(key NeedleId) Size { x := cm.binarySearchCompactSection(key) if x < 0 { - return uint32(0) + return Size(0) } return cm.list[x].Delete(key) } diff --git a/weed/storage/needle_map/compact_map_perf_test.go b/weed/storage/needle_map/compact_map_perf_test.go index 3a3648641..081fb34e9 100644 --- a/weed/storage/needle_map/compact_map_perf_test.go +++ b/weed/storage/needle_map/compact_map_perf_test.go @@ -9,7 +9,6 @@ import ( "time" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" ) /* @@ -32,7 +31,7 @@ func TestMemoryUsage(t *testing.T) { startTime := time.Now() for i := 0; i < 10; i++ { - indexFile, ie := os.OpenFile("../../../test/sample.idx", os.O_RDWR|os.O_RDONLY, 0644) + indexFile, ie := os.OpenFile("../../../test/data/sample.idx", os.O_RDWR|os.O_RDONLY, 0644) if ie != nil { log.Fatalln(ie) } @@ -60,7 +59,7 @@ func loadNewNeedleMap(file *os.File) (*CompactMap, uint64) { rowCount++ key := BytesToNeedleId(bytes[i : i+NeedleIdSize]) offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize]) - size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize]) + size := BytesToSize(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize]) if !offset.IsZero() { m.Set(NeedleId(key), offset, size) diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go index 7eea3969a..199cb26b3 100644 --- a/weed/storage/needle_map/compact_map_test.go +++ b/weed/storage/needle_map/compact_map_test.go @@ -49,7 +49,7 @@ func TestIssue52(t *testing.T) { func TestCompactMap(t *testing.T) { m := NewCompactMap() for i := uint32(0); i < 100*batch; i += 2 { - m.Set(NeedleId(i), ToOffset(int64(i)), i) + m.Set(NeedleId(i), ToOffset(int64(i)), Size(i)) } for i := uint32(0); i < 100*batch; i += 37 { @@ -57,7 +57,7 @@ func TestCompactMap(t *testing.T) { } for i := uint32(0); i < 10*batch; i += 3 { - m.Set(NeedleId(i), ToOffset(int64(i+11)), i+5) + m.Set(NeedleId(i), ToOffset(int64(i+11)), Size(i+5)) } // for i := uint32(0); i < 100; i++ { @@ -72,15 +72,15 @@ func TestCompactMap(t *testing.T) { if !ok { t.Fatal("key", i, "missing!") } - if v.Size != i+5 { + if v.Size != Size(i+5) { t.Fatal("key", i, "size", v.Size) } } else if i%37 == 0 { - if ok && v.Size != TombstoneFileSize { + if ok && v.Size.IsValid() { t.Fatal("key", i, "should have been deleted needle value", v) } } else if i%2 == 0 { - if v.Size != i { + if v.Size != Size(i) { t.Fatal("key", i, "size", v.Size) } } @@ -89,14 +89,14 @@ func TestCompactMap(t *testing.T) { for i := uint32(10 * batch); i < 100*batch; i++ { v, ok := m.Get(NeedleId(i)) if i%37 == 0 { - if ok && v.Size != TombstoneFileSize { + if ok && v.Size.IsValid() { t.Fatal("key", i, "should have been deleted needle value", v) } } else if i%2 == 0 { if v == nil { t.Fatal("key", i, "missing") } - if v.Size != i { + if v.Size != Size(i) { t.Fatal("key", i, "size", v.Size) } } @@ -129,8 +129,8 @@ func TestOverflow(t *testing.T) { cs.deleteOverflowEntry(4) - if len(cs.overflow) != 4 { - t.Fatalf("expecting 4 entries now: %+v", cs.overflow) + if len(cs.overflow) != 5 { + t.Fatalf("expecting 5 entries now: %+v", cs.overflow) } _, x, _ := cs.findOverflowEntry(5) @@ -146,7 +146,7 @@ func TestOverflow(t *testing.T) { cs.deleteOverflowEntry(1) for i, x := range cs.overflow { - println("overflow[", i, "]:", x.Key) + println("overflow[", i, "]:", x.Key, "size", x.Size) } println() diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index 6aba6adeb..ba1fd3d1e 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -2,6 +2,7 @@ package needle_map import ( "fmt" + "io" "os" "github.com/syndtr/goleveldb/leveldb" @@ -11,7 +12,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/idx" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" ) //This map uses in memory level db @@ -32,7 +32,7 @@ func NewMemDb() *MemDb { return t } -func (cm *MemDb) Set(key NeedleId, offset Offset, size uint32) error { +func (cm *MemDb) Set(key NeedleId, offset Offset, size Size) error { bytes := ToBytes(key, offset, size) @@ -56,7 +56,7 @@ func (cm *MemDb) Get(key NeedleId) (*NeedleValue, bool) { return nil, false } offset := BytesToOffset(data[0:OffsetSize]) - size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + size := BytesToSize(data[OffsetSize : OffsetSize+SizeSize]) return &NeedleValue{Key: key, Offset: offset, Size: size}, true } @@ -67,7 +67,7 @@ func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) { key := BytesToNeedleId(iter.Key()) data := iter.Value() offset := BytesToOffset(data[0:OffsetSize]) - size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + size := BytesToSize(data[OffsetSize : OffsetSize+SizeSize]) needle := NeedleValue{Key: key, Offset: offset, Size: size} ret = visit(needle) @@ -89,6 +89,9 @@ func (cm *MemDb) SaveToIdx(idxName string) (ret error) { defer idxFile.Close() return cm.AscendingVisit(func(value NeedleValue) error { + if value.Offset.IsZero() || value.Size.IsDeleted() { + return nil + } _, err := idxFile.Write(value.ToBytes()) return err }) @@ -102,11 +105,21 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { } defer idxFile.Close() - return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error { - if offset.IsZero() || size == TombstoneFileSize { - return nil + return cm.LoadFromReaderAt(idxFile) + +} + +func (cm *MemDb) LoadFromReaderAt(readerAt io.ReaderAt) (ret error) { + + return idx.WalkIndexFile(readerAt, func(key NeedleId, offset Offset, size Size) error { + if offset.IsZero() || size.IsDeleted() { + return cm.Delete(key) } return cm.Set(key, offset, size) }) } + +func (cm *MemDb) Close() { + cm.db.Close() +} diff --git a/weed/storage/needle_map/memdb_test.go b/weed/storage/needle_map/memdb_test.go new file mode 100644 index 000000000..7b45d23f8 --- /dev/null +++ b/weed/storage/needle_map/memdb_test.go @@ -0,0 +1,23 @@ +package needle_map + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func BenchmarkMemDb(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + nm := NewMemDb() + + nid := types.NeedleId(345) + offset := types.Offset{ + OffsetHigher: types.OffsetHigher{}, + OffsetLower: types.OffsetLower{}, + } + nm.Set(nid, offset, 324) + nm.Close() + } + +} diff --git a/weed/storage/needle_map/needle_value.go b/weed/storage/needle_map/needle_value.go index ef540b55e..f8d614660 100644 --- a/weed/storage/needle_map/needle_value.go +++ b/weed/storage/needle_map/needle_value.go @@ -9,7 +9,7 @@ import ( type NeedleValue struct { Key NeedleId Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G - Size uint32 `comment:"Size of the data portion"` + Size Size `comment:"Size of the data portion"` } func (this NeedleValue) Less(than btree.Item) bool { @@ -21,10 +21,10 @@ func (nv NeedleValue) ToBytes() []byte { return ToBytes(nv.Key, nv.Offset, nv.Size) } -func ToBytes(key NeedleId, offset Offset, size uint32) []byte { +func ToBytes(key NeedleId, offset Offset, size Size) []byte { bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize) NeedleIdToBytes(bytes[0:NeedleIdSize], key) OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset) - util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size) + util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], uint32(size)) return bytes } diff --git a/weed/storage/needle_map/needle_value_map.go b/weed/storage/needle_map/needle_value_map.go index 0a5a00ef7..a30cb96c4 100644 --- a/weed/storage/needle_map/needle_value_map.go +++ b/weed/storage/needle_map/needle_value_map.go @@ -5,8 +5,8 @@ import ( ) type NeedleValueMap interface { - Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) - Delete(key NeedleId) uint32 + Set(key NeedleId, offset Offset, size Size) (oldOffset Offset, oldSize Size) + Delete(key NeedleId) Size Get(key NeedleId) (*NeedleValue, bool) AscendingVisit(visit func(NeedleValue) error) error } diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index ef8571e83..31c86d124 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -5,14 +5,16 @@ import ( "os" "path/filepath" - "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/syndtr/goleveldb/leveldb" ) type LevelDbNeedleMap struct { @@ -25,14 +27,24 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option m = &LevelDbNeedleMap{dbFileName: dbFileName} m.indexFile = indexFile if !isLevelDbFresh(dbFileName, indexFile) { - glog.V(0).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) + glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) generateLevelDbFile(dbFileName, indexFile) - glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) + glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) + } + if stat, err := indexFile.Stat(); err != nil { + glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + } else { + m.indexFileOffset = stat.Size() } glog.V(1).Infof("Opening %s...", dbFileName) if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil { - return + if errors.IsCorrupted(err) { + m.db, err = leveldb.RecoverFile(dbFileName, opts) + } + if err != nil { + return + } } glog.V(1).Infof("Loading %s...", indexFile.Name()) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) @@ -66,8 +78,8 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { return err } defer db.Close() - return idx.WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error { - if !offset.IsZero() && size != TombstoneFileSize { + return idx.WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size Size) error { + if !offset.IsZero() && size.IsValid() { levelDbWrite(db, key, offset, size) } else { levelDbDelete(db, key) @@ -84,12 +96,12 @@ func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, o return nil, false } offset := BytesToOffset(data[0:OffsetSize]) - size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + size := BytesToSize(data[OffsetSize : OffsetSize+SizeSize]) return &needle_map.NeedleValue{Key: key, Offset: offset, Size: size}, true } -func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { - var oldSize uint32 +func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { + var oldSize Size if oldNeedle, ok := m.Get(key); ok { oldSize = oldNeedle.Size } @@ -101,7 +113,7 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { return levelDbWrite(m.db, key, offset, size) } -func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size uint32) error { +func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size) error { bytes := needle_map.ToBytes(key, offset, size) @@ -117,19 +129,34 @@ func levelDbDelete(db *leveldb.DB, key NeedleId) error { } func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { - if oldNeedle, ok := m.Get(key); ok { - m.logDelete(oldNeedle.Size) + oldNeedle, found := m.Get(key) + if !found || oldNeedle.Size.IsDeleted() { + return nil } + m.logDelete(oldNeedle.Size) + // write to index file first if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { return err } - return levelDbDelete(m.db, key) + + return levelDbWrite(m.db, key, oldNeedle.Offset, -oldNeedle.Size) } func (m *LevelDbNeedleMap) Close() { - m.indexFile.Close() - m.db.Close() + indexFileName := m.indexFile.Name() + if err := m.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed: %v", indexFileName, err) + } + if err := m.indexFile.Close(); err != nil { + glog.Warningf("close index file %s failed: %v", indexFileName, err) + } + + if m.db != nil { + if err := m.db.Close(); err != nil { + glog.Warningf("close levelDB failed: %v", err) + } + } } func (m *LevelDbNeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index 37dee7889..1b58708c6 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -19,6 +19,11 @@ func NewCompactNeedleMap(file *os.File) *NeedleMap { m: needle_map.NewCompactMap(), } nm.indexFile = file + stat, err := file.Stat() + if err != nil { + glog.Fatalf("stat file %s: %v", file.Name(), err) + } + nm.indexFileOffset = stat.Size() return nm } @@ -28,13 +33,13 @@ func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) { } func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { - e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error { + e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size Size) error { nm.MaybeSetMaxFileKey(key) - if !offset.IsZero() && size != TombstoneFileSize { + if !offset.IsZero() && size.IsValid() { nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size) - if !oldOffset.IsZero() && oldSize != TombstoneFileSize { + if !oldOffset.IsZero() && oldSize.IsValid() { nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } @@ -49,7 +54,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { return nm, e } -func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error { +func (nm *NeedleMap) Put(key NeedleId, offset Offset, size Size) error { _, oldSize := nm.m.Set(NeedleId(key), offset, size) nm.logPut(key, oldSize, size) return nm.appendToIndexFile(key, offset, size) @@ -64,6 +69,10 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error { return nm.appendToIndexFile(key, offset, TombstoneFileSize) } func (nm *NeedleMap) Close() { + indexFileName := nm.indexFile.Name() + if err := nm.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed, %v", indexFileName, err) + } _ = nm.indexFile.Close() } func (nm *NeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go index 823a04108..3618dada9 100644 --- a/weed/storage/needle_map_metric.go +++ b/weed/storage/needle_map_metric.go @@ -18,31 +18,31 @@ type mapMetric struct { MaximumFileKey uint64 `json:"MaxFileKey"` } -func (mm *mapMetric) logDelete(deletedByteCount uint32) { +func (mm *mapMetric) logDelete(deletedByteCount Size) { if mm == nil { return } mm.LogDeletionCounter(deletedByteCount) } -func (mm *mapMetric) logPut(key NeedleId, oldSize uint32, newSize uint32) { +func (mm *mapMetric) logPut(key NeedleId, oldSize Size, newSize Size) { if mm == nil { return } mm.MaybeSetMaxFileKey(key) mm.LogFileCounter(newSize) - if oldSize > 0 && oldSize != TombstoneFileSize { + if oldSize > 0 && oldSize.IsValid() { mm.LogDeletionCounter(oldSize) } } -func (mm *mapMetric) LogFileCounter(newSize uint32) { +func (mm *mapMetric) LogFileCounter(newSize Size) { if mm == nil { return } atomic.AddUint32(&mm.FileCounter, 1) atomic.AddUint64(&mm.FileByteCounter, uint64(newSize)) } -func (mm *mapMetric) LogDeletionCounter(oldSize uint32) { +func (mm *mapMetric) LogDeletionCounter(oldSize Size) { if mm == nil { return } @@ -97,11 +97,11 @@ func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) { buf := make([]byte, NeedleIdSize) err = reverseWalkIndexFile(r, func(entryCount int64) { bf = bloom.NewWithEstimates(uint(entryCount), 0.001) - }, func(key NeedleId, offset Offset, size uint32) error { + }, func(key NeedleId, offset Offset, size Size) error { mm.MaybeSetMaxFileKey(key) NeedleIdToBytes(buf, key) - if size != TombstoneFileSize { + if size.IsValid() { mm.FileByteCounter += uint64(size) } @@ -111,7 +111,7 @@ func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) { } else { // deleted file mm.DeletionCounter++ - if size != TombstoneFileSize { + if size.IsValid() { // previously already deleted file mm.DeletionByteCounter += uint64(size) } @@ -121,7 +121,7 @@ func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) { return } -func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key NeedleId, offset Offset, size uint32) error) error { +func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key NeedleId, offset Offset, size Size) error) error { fi, err := r.Stat() if err != nil { return fmt.Errorf("file %s stat error: %v", r.Name(), err) diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index ae2177a30..362659a11 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -15,7 +15,7 @@ func TestFastLoadingNeedleMapMetrics(t *testing.T) { nm := NewCompactNeedleMap(idxFile) for i := 0; i < 10000; i++ { - nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1)) + nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), Size(1)) if rand.Float32() < 0.2 { nm.Delete(Uint64ToNeedleId(uint64(rand.Int63n(int64(i))+1)), Uint32ToOffset(uint32(0))) } diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go index e6f9258f3..662b90531 100644 --- a/weed/storage/needle_map_sorted_file.go +++ b/weed/storage/needle_map_sorted_file.go @@ -16,18 +16,18 @@ type SortedFileNeedleMap struct { dbFileSize int64 } -func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedFileNeedleMap, err error) { - m = &SortedFileNeedleMap{baseFileName: baseFileName} +func NewSortedFileNeedleMap(indexBaseFileName string, indexFile *os.File) (m *SortedFileNeedleMap, err error) { + m = &SortedFileNeedleMap{baseFileName: indexBaseFileName} m.indexFile = indexFile - fileName := baseFileName + ".sdx" + fileName := indexBaseFileName + ".sdx" if !isSortedFileFresh(fileName, indexFile) { glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) - erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx") + erasure_coding.WriteSortedFileFromIdx(indexBaseFileName, ".sdx") glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) } glog.V(1).Infof("Opening %s...", fileName) - if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil { + if m.dbFile, err = os.Open(indexBaseFileName + ".sdx"); err != nil { return } dbStat, _ := m.dbFile.Stat() @@ -65,7 +65,7 @@ func (m *SortedFileNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue } -func (m *SortedFileNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { +func (m *SortedFileNeedleMap) Put(key NeedleId, offset Offset, size Size) error { return os.ErrInvalid } @@ -80,7 +80,7 @@ func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error { return err } - if size == TombstoneFileSize { + if size.IsDeleted() { return nil } @@ -94,8 +94,12 @@ func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error { } func (m *SortedFileNeedleMap) Close() { - m.indexFile.Close() - m.dbFile.Close() + if m.indexFile != nil { + m.indexFile.Close() + } + if m.dbFile != nil { + m.dbFile.Close() + } } func (m *SortedFileNeedleMap) Destroy() error { diff --git a/weed/storage/store.go b/weed/storage/store.go index 512f72ceb..6be15a4c9 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -2,13 +2,17 @@ package storage import ( "fmt" + "path/filepath" + "strings" "sync/atomic" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -18,21 +22,25 @@ const ( MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes ) +type ReadOption struct { + ReadDeleted bool +} + /* * A VolumeServer contains one Store */ type Store struct { MasterAddress string grpcDialOption grpc.DialOption - volumeSizeLimit uint64 //read from the master + volumeSizeLimit uint64 // read from the master Ip string Port int PublicUrl string Locations []*DiskLocation - dataCenter string //optional informaton, overwriting master setting if exists - rack string //optional information, overwriting master setting if exists + dataCenter string // optional informaton, overwriting master setting if exists + rack string // optional information, overwriting master setting if exists connected bool - NeedleMapType NeedleMapType + NeedleMapKind NeedleMapKind NewVolumesChan chan master_pb.VolumeShortInformationMessage DeletedVolumesChan chan master_pb.VolumeShortInformationMessage NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage @@ -44,11 +52,11 @@ func (s *Store) String() (str string) { return } -func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) { - s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind} +func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) { + s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapKind: needleMapKind} s.Locations = make([]*DiskLocation, 0) for i := 0; i < len(dirnames); i++ { - location := NewDiskLocation(dirnames[i], maxVolumeCounts[i]) + location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i], idxFolder, diskTypes[i]) location.loadExistingVolumes(needleMapKind) s.Locations = append(s.Locations, location) stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i])) @@ -61,7 +69,7 @@ func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, di return } -func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32) error { +func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType) error { rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement) if e != nil { return e @@ -70,7 +78,7 @@ func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMap if e != nil { return e } - e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb) + e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb, diskType) return e } func (s *Store) DeleteCollection(collection string) (e error) { @@ -92,10 +100,19 @@ func (s *Store) findVolume(vid needle.VolumeId) *Volume { } return nil } -func (s *Store) FindFreeLocation() (ret *DiskLocation) { +func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) { max := 0 for _, location := range s.Locations { + if diskType != location.DiskType { + continue + } + if location.isDiskSpaceLow { + continue + } currentFreeCount := location.MaxVolumeCount - location.VolumesLen() + currentFreeCount *= erasure_coding.DataShardsCount + currentFreeCount -= location.EcVolumesLen() + currentFreeCount /= erasure_coding.DataShardsCount if currentFreeCount > max { max = currentFreeCount ret = location @@ -103,14 +120,14 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) { } return ret } -func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error { +func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType) error { if s.findVolume(vid) != nil { return fmt.Errorf("Volume Id %d already exists!", vid) } - if location := s.FindFreeLocation(); location != nil { + if location := s.FindFreeLocation(diskType); location != nil { glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) - if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil { + if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil { location.SetVolume(vid, volume) glog.V(0).Infof("add volume %d", vid) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ @@ -119,6 +136,7 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind ReplicaPlacement: uint32(replicaPlacement.Byte()), Version: uint32(volume.Version()), Ttl: ttl.ToUint32(), + DiskType: string(diskType), } return nil } else { @@ -128,64 +146,130 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind return fmt.Errorf("No more free space left") } -func (s *Store) VolumeInfos() []*VolumeInfo { - var stats []*VolumeInfo +func (s *Store) VolumeInfos() (allStats []*VolumeInfo) { for _, location := range s.Locations { - location.volumesLock.RLock() - for k, v := range location.volumes { - s := &VolumeInfo{ - Id: needle.VolumeId(k), - Size: v.ContentSize(), - Collection: v.Collection, - ReplicaPlacement: v.ReplicaPlacement, - Version: v.Version(), - FileCount: int(v.FileCount()), - DeleteCount: int(v.DeletedCount()), - DeletedByteCount: v.DeletedSize(), - ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, - Ttl: v.Ttl, - CompactRevision: uint32(v.CompactionRevision), - } - s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey() - stats = append(stats, s) - } - location.volumesLock.RUnlock() + stats := collectStatsForOneLocation(location) + allStats = append(allStats, stats...) + } + sortVolumeInfos(allStats) + return allStats +} + +func collectStatsForOneLocation(location *DiskLocation) (stats []*VolumeInfo) { + location.volumesLock.RLock() + defer location.volumesLock.RUnlock() + + for k, v := range location.volumes { + s := collectStatForOneVolume(k, v) + stats = append(stats, s) } - sortVolumeInfos(stats) return stats } +func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) { + + s = &VolumeInfo{ + Id: vid, + Collection: v.Collection, + ReplicaPlacement: v.ReplicaPlacement, + Version: v.Version(), + ReadOnly: v.IsReadOnly(), + Ttl: v.Ttl, + CompactRevision: uint32(v.CompactionRevision), + DiskType: v.DiskType().String(), + } + s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey() + + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + + if v.nm == nil { + return + } + + s.FileCount = v.nm.FileCount() + s.DeleteCount = v.nm.DeletedCount() + s.DeletedByteCount = v.nm.DeletedSize() + s.Size = v.nm.ContentSize() + + return +} + func (s *Store) SetDataCenter(dataCenter string) { s.dataCenter = dataCenter } func (s *Store) SetRack(rack string) { s.rack = rack } +func (s *Store) GetDataCenter() string { + return s.dataCenter +} +func (s *Store) GetRack() string { + return s.rack +} func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { var volumeMessages []*master_pb.VolumeInformationMessage - maxVolumeCount := 0 + maxVolumeCounts := make(map[string]uint32) var maxFileKey NeedleId collectionVolumeSize := make(map[string]uint64) + collectionVolumeReadOnlyCount := make(map[string]map[string]uint8) for _, location := range s.Locations { var deleteVids []needle.VolumeId - maxVolumeCount = maxVolumeCount + location.MaxVolumeCount + maxVolumeCounts[string(location.DiskType)] += uint32(location.MaxVolumeCount) location.volumesLock.RLock() for _, v := range location.volumes { - if maxFileKey < v.MaxFileKey() { - maxFileKey = v.MaxFileKey() + curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage() + if volumeMessage == nil { + continue + } + if maxFileKey < curMaxFileKey { + maxFileKey = curMaxFileKey } - if !v.expired(s.GetVolumeSizeLimit()) { - volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage()) + deleteVolume := false + if !v.expired(volumeMessage.Size, s.GetVolumeSizeLimit()) { + volumeMessages = append(volumeMessages, volumeMessage) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { deleteVids = append(deleteVids, v.Id) + deleteVolume = true } else { - glog.V(0).Infoln("volume", v.Id, "is expired.") + glog.V(0).Infof("volume %d is expired", v.Id) + } + if v.lastIoError != nil { + deleteVids = append(deleteVids, v.Id) + deleteVolume = true + glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) + } + } + + if _, exist := collectionVolumeSize[v.Collection]; !exist { + collectionVolumeSize[v.Collection] = 0 + } + if !deleteVolume { + collectionVolumeSize[v.Collection] += volumeMessage.Size + } + + if _, exist := collectionVolumeReadOnlyCount[v.Collection]; !exist { + collectionVolumeReadOnlyCount[v.Collection] = map[string]uint8{ + "IsReadOnly": 0, + "noWriteOrDelete": 0, + "noWriteCanDelete": 0, + "isDiskSpaceLow": 0, + } + } + if !deleteVolume && v.IsReadOnly() { + collectionVolumeReadOnlyCount[v.Collection]["IsReadOnly"] += 1 + if v.noWriteOrDelete { + collectionVolumeReadOnlyCount[v.Collection]["noWriteOrDelete"] += 1 + } + if v.noWriteCanDelete { + collectionVolumeReadOnlyCount[v.Collection]["noWriteCanDelete"] += 1 + } + if v.location.isDiskSpaceLow { + collectionVolumeReadOnlyCount[v.Collection]["isDiskSpaceLow"] += 1 } } - fileSize, _, _ := v.FileStat() - collectionVolumeSize[v.Collection] += fileSize } location.volumesLock.RUnlock() @@ -193,8 +277,14 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { // delete expired volumes. location.volumesLock.Lock() for _, vid := range deleteVids { - location.deleteVolumeById(vid) - glog.V(0).Infoln("volume", vid, "is deleted.") + found, err := location.deleteVolumeById(vid) + if err == nil { + if found { + glog.V(0).Infof("volume %d is deleted", vid) + } + } else { + glog.Warningf("delete volume %d: %v", vid, err) + } } location.volumesLock.Unlock() } @@ -204,16 +294,22 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size)) } + for col, types := range collectionVolumeReadOnlyCount { + for t, count := range types { + stats.VolumeServerReadOnlyVolumeGauge.WithLabelValues(col, t).Set(float64(count)) + } + } + return &master_pb.Heartbeat{ - Ip: s.Ip, - Port: uint32(s.Port), - PublicUrl: s.PublicUrl, - MaxVolumeCount: uint32(maxVolumeCount), - MaxFileKey: NeedleIdToUint64(maxFileKey), - DataCenter: s.dataCenter, - Rack: s.rack, - Volumes: volumeMessages, - HasNoVolumes: len(volumeMessages) == 0, + Ip: s.Ip, + Port: uint32(s.Port), + PublicUrl: s.PublicUrl, + MaxVolumeCounts: maxVolumeCounts, + MaxFileKey: NeedleIdToUint64(maxFileKey), + DataCenter: s.dataCenter, + Rack: s.rack, + Volumes: volumeMessages, + HasNoVolumes: len(volumeMessages) == 0, } } @@ -224,17 +320,13 @@ func (s *Store) Close() { } } -func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uint32, isUnchanged bool, err error) { +func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync bool) (isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { - if v.noWriteOrDelete || v.noWriteCanDelete { + if v.IsReadOnly() { err = fmt.Errorf("volume %d is read only", i) return } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.Version())) { - _, size, isUnchanged, err = v.writeNeedle(n) - } else { - err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) - } + _, _, isUnchanged, err = v.writeNeedle2(n, fsync) return } glog.V(0).Infoln("volume", i, "not found!") @@ -242,23 +334,19 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uin return } -func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (uint32, error) { +func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (Size, error) { if v := s.findVolume(i); v != nil { if v.noWriteOrDelete { return 0, fmt.Errorf("volume %d is read only", i) } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.Version())) { - return v.deleteNeedle(n) - } else { - return 0, fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) - } + return v.deleteNeedle2(n) } return 0, fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port) } -func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle) (int, error) { +func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle, readOption *ReadOption) (int, error) { if v := s.findVolume(i); v != nil { - return v.readNeedle(n) + return v.readNeedle(n, readOption) } return 0, fmt.Errorf("volume %d not found", i) } @@ -276,13 +364,26 @@ func (s *Store) MarkVolumeReadonly(i needle.VolumeId) error { if v == nil { return fmt.Errorf("volume %d not found", i) } + v.noWriteLock.Lock() v.noWriteOrDelete = true + v.noWriteLock.Unlock() + return nil +} + +func (s *Store) MarkVolumeWritable(i needle.VolumeId) error { + v := s.findVolume(i) + if v == nil { + return fmt.Errorf("volume %d not found", i) + } + v.noWriteLock.Lock() + v.noWriteOrDelete = false + v.noWriteLock.Unlock() return nil } func (s *Store) MountVolume(i needle.VolumeId) error { for _, location := range s.Locations { - if found := location.LoadVolume(i, s.NeedleMapType); found == true { + if found := location.LoadVolume(i, s.NeedleMapKind); found == true { glog.V(0).Infof("mount volume %d", i) v := s.findVolume(i) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ @@ -291,6 +392,7 @@ func (s *Store) MountVolume(i needle.VolumeId) error { ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), + DiskType: string(v.location.DiskType), } return nil } @@ -310,6 +412,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error { ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), + DiskType: string(v.location.DiskType), } for _, location := range s.Locations { @@ -326,7 +429,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error { func (s *Store) DeleteVolume(i needle.VolumeId) error { v := s.findVolume(i) if v == nil { - return nil + return fmt.Errorf("delete volume %d not found on disk", i) } message := master_pb.VolumeShortInformationMessage{ Id: uint32(v.Id), @@ -334,18 +437,46 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error { ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), + DiskType: string(v.location.DiskType), } for _, location := range s.Locations { - if error := location.deleteVolumeById(i); error == nil { + if err := location.DeleteVolume(i); err == nil { glog.V(0).Infof("DeleteVolume %d", i) s.DeletedVolumesChan <- message return nil + } else { + glog.Errorf("DeleteVolume %d: %v", i, err) } } return fmt.Errorf("volume %d not found on disk", i) } +func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { + + for _, location := range s.Locations { + fileInfo, found := location.LocateVolume(i) + if !found { + continue + } + // load, modify, save + baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) + vifFile := filepath.Join(location.Directory, baseFileName+".vif") + volumeInfo, _, _, err := pb.MaybeLoadVolumeInfo(vifFile) + if err != nil { + return fmt.Errorf("volume %d fail to load vif", i) + } + volumeInfo.Replication = replication + err = pb.SaveVolumeInfo(vifFile, volumeInfo) + if err != nil { + return fmt.Errorf("volume %d fail to save vif", i) + } + return nil + } + + return fmt.Errorf("volume %d not found on disk", i) +} + func (s *Store) SetVolumeSizeLimit(x uint64) { atomic.StoreUint64(&s.volumeSizeLimit, x) } @@ -353,3 +484,28 @@ func (s *Store) SetVolumeSizeLimit(x uint64) { func (s *Store) GetVolumeSizeLimit() uint64 { return atomic.LoadUint64(&s.volumeSizeLimit) } + +func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) { + volumeSizeLimit := s.GetVolumeSizeLimit() + if volumeSizeLimit == 0 { + return + } + for _, diskLocation := range s.Locations { + if diskLocation.OriginalMaxVolumeCount == 0 { + currentMaxVolumeCount := diskLocation.MaxVolumeCount + diskStatus := stats.NewDiskStatus(diskLocation.Directory) + unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit) + unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace) + volCount := diskLocation.VolumesLen() + maxVolumeCount := volCount + if unclaimedSpaces > int64(volumeSizeLimit) { + maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1 + } + diskLocation.MaxVolumeCount = maxVolumeCount + glog.V(2).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB", + diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) + hasChanges = hasChanges || currentMaxVolumeCount != diskLocation.MaxVolumeCount + } + } + return +} diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 27406451f..9702fdd50 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "os" "sort" "sync" "time" @@ -57,8 +58,11 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er Id: uint32(vid), Collection: collection, EcIndexBits: uint32(shardBits.AddShardId(shardId)), + DiskType: string(location.DiskType), } return nil + } else if err == os.ErrNotExist { + continue } else { return fmt.Errorf("%s load ec shard %d.%d: %v", location.Directory, vid, shardId, err) } @@ -79,6 +83,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar Id: uint32(vid), Collection: ecShard.Collection, EcIndexBits: uint32(shardBits.AddShardId(shardId)), + DiskType: string(ecShard.DiskType), } for _, location := range s.Locations { @@ -116,7 +121,7 @@ func (s *Store) DestroyEcVolume(vid needle.VolumeId) { } } -func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) { +func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) { for _, location := range s.Locations { if localEcVolume, found := location.FindEcVolume(vid); found { @@ -124,24 +129,24 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n if err != nil { return 0, fmt.Errorf("locate in local ec volume: %v", err) } - if size == types.TombstoneFileSize { - return 0, fmt.Errorf("entry %s is deleted", n.Id) + if size.IsDeleted() { + return 0, ErrorDeleted } - glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals) + glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) if len(intervals) > 1 { glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) } - bytes, isDeleted, err := s.readEcShardIntervals(ctx, vid, n.Id, localEcVolume, intervals) + bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals) if err != nil { return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) } if isDeleted { - return 0, fmt.Errorf("ec entry %s is deleted", n.Id) + return 0, ErrorDeleted } - err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version) + err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version) if err != nil { return 0, fmt.Errorf("readbytes: %v", err) } @@ -152,14 +157,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec shard %d not found", vid) } -func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readEcShardIntervals(vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { - if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil { + if err = s.cachedLookupEcShardLocations(ecVolume); err != nil { return nil, false, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err) } for i, interval := range intervals { - if d, isDeleted, e := s.readOneEcShardInterval(ctx, needleId, ecVolume, interval); e != nil { + if d, isDeleted, e := s.readOneEcShardInterval(needleId, ecVolume, interval); e != nil { return nil, isDeleted, e } else { if isDeleted { @@ -175,12 +180,12 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, n return } -func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) data = make([]byte, interval.Size) if shard, found := ecVolume.FindEcVolumeShard(shardId); found { if _, err = shard.ReadAt(data, actualOffset); err != nil { - glog.V(0).Infof("read local ec shard %d.%d: %v", ecVolume.VolumeId, shardId, err) + glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err) return } } else { @@ -190,16 +195,15 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl // try reading directly if hasShardIdLocation { - _, is_deleted, err = s.readRemoteEcShardInterval(ctx, sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) + _, is_deleted, err = s.readRemoteEcShardInterval(sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) if err == nil { return } glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) - forgetShardId(ecVolume, shardId) } // try reading by recovering from other shards - _, is_deleted, err = s.recoverOneRemoteEcShardInterval(ctx, needleId, ecVolume, shardId, data, actualOffset) + _, is_deleted, err = s.recoverOneRemoteEcShardInterval(needleId, ecVolume, shardId, data, actualOffset) if err == nil { return } @@ -215,7 +219,7 @@ func forgetShardId(ecVolume *erasure_coding.EcVolume, shardId erasure_coding.Sha ecVolume.ShardLocationsLock.Unlock() } -func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *erasure_coding.EcVolume) (err error) { +func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) (err error) { shardCount := len(ecVolume.ShardLocations) if shardCount < erasure_coding.DataShardsCount && @@ -234,7 +238,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras req := &master_pb.LookupEcVolumeRequest{ VolumeId: uint32(ecVolume.VolumeId), } - resp, err := masterClient.LookupEcVolume(ctx, req) + resp, err := masterClient.LookupEcVolume(context.Background(), req) if err != nil { return fmt.Errorf("lookup ec volume %d: %v", ecVolume.VolumeId, err) } @@ -258,7 +262,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras return } -func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { if len(sourceDataNodes) == 0 { return 0, false, fmt.Errorf("failed to find ec shard %d.%d", vid, shardId) @@ -266,7 +270,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ for _, sourceDataNode := range sourceDataNodes { glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) - n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset) + n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return } @@ -276,12 +280,12 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ return } -func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{ + shardReadClient, err := client.VolumeEcShardRead(context.Background(), &volume_server_pb.VolumeEcShardReadRequest{ VolumeId: uint32(vid), ShardId: uint32(shardId), Offset: offset, @@ -298,7 +302,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode break } if receiveErr != nil { - return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) + return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, receiveErr) } if resp.IsDeleted { is_deleted = true @@ -316,7 +320,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode return } -func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) @@ -344,7 +348,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId ty go func(shardId erasure_coding.ShardId, locations []string) { defer wg.Done() data := make([]byte, len(buf)) - nRead, isDeleted, readErr := s.readRemoteEcShardInterval(ctx, locations, needleId, ecVolume.VolumeId, shardId, data, offset) + nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset) if readErr != nil { glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) forgetShardId(ecVolume, shardId) diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index e027d2887..4a75fb20b 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -12,9 +12,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/types" ) -func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { +func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { - count, err := s.ReadEcShardNeedle(ctx, ecVolume.VolumeId, n) + count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n) if err != nil { return 0, err @@ -24,7 +24,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin return 0, fmt.Errorf("unexpected cookie %x", cookie) } - if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx, ecVolume, n.Id); err != nil { + if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume, n.Id); err != nil { return 0, err } @@ -32,7 +32,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin } -func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { _, _, intervals, err := ecVolume.LocateEcShardNeedle(needleId, ecVolume.Version) @@ -43,13 +43,13 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, shardId, _ := intervals[0].ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) hasDeletionSuccess := false - err = s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId) + err = s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId) if err == nil { hasDeletionSuccess = true } for shardId = erasure_coding.DataShardsCount; shardId < erasure_coding.TotalShardsCount; shardId++ { - if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId); parityDeletionError == nil { + if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId); parityDeletionError == nil { hasDeletionSuccess = true } } @@ -62,7 +62,7 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, } -func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { ecVolume.ShardLocationsLock.RLock() sourceDataNodes, hasShardLocations := ecVolume.ShardLocations[shardId] @@ -74,7 +74,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar for _, sourceDataNode := range sourceDataNodes { glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) - err := s.doDeleteNeedleFromRemoteEcShard(ctx, sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) + err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) if err != nil { return err } @@ -85,12 +85,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar } -func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShard(sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{ + _, err := client.VolumeEcBlobDelete(context.Background(), &volume_server_pb.VolumeEcBlobDeleteRequest{ VolumeId: uint32(vid), Collection: collection, FileKey: uint64(needleId), diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index 5dacb71bf..32666a417 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -16,7 +17,11 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { } func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { if v := s.findVolume(vid); v != nil { - return v.Compact2(preallocate) // compactionBytePerSecond + s := stats.NewDiskStatus(v.dir) + if int64(s.Free) < preallocate { + return fmt.Errorf("free space: %d bytes, not enough for %d bytes", s.Free, preallocate) + } + return v.Compact2(preallocate, compactionBytePerSecond) } return fmt.Errorf("volume id %d is not found during compact", vid) } diff --git a/weed/storage/super_block/replica_placement.go b/weed/storage/super_block/replica_placement.go index fcccbba7d..a263e6669 100644 --- a/weed/storage/super_block/replica_placement.go +++ b/weed/storage/super_block/replica_placement.go @@ -6,9 +6,9 @@ import ( ) type ReplicaPlacement struct { - SameRackCount int - DiffRackCount int - DiffDataCenterCount int + SameRackCount int `json:"node,omitempty"` + DiffRackCount int `json:"rack,omitempty"` + DiffDataCenterCount int `json:"dc,omitempty"` } func NewReplicaPlacementFromString(t string) (*ReplicaPlacement, error) { @@ -36,6 +36,9 @@ func NewReplicaPlacementFromByte(b byte) (*ReplicaPlacement, error) { } func (rp *ReplicaPlacement) Byte() byte { + if rp == nil { + return 0 + } ret := rp.DiffDataCenterCount*100 + rp.DiffRackCount*10 + rp.SameRackCount return byte(ret) } diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go index 2ebb392db..137b97d7f 100644 --- a/weed/storage/types/needle_types.go +++ b/weed/storage/types/needle_types.go @@ -2,9 +2,9 @@ package types import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/util" - "math" "strconv" + + "github.com/chrislusf/seaweedfs/weed/util" ) type Offset struct { @@ -12,6 +12,15 @@ type Offset struct { OffsetLower } +type Size int32 + +func (s Size) IsDeleted() bool { + return s < 0 || s == TombstoneFileSize +} +func (s Size) IsValid() bool { + return s > 0 && s != TombstoneFileSize +} + type OffsetLower struct { b3 byte b2 byte @@ -27,7 +36,7 @@ const ( NeedleMapEntrySize = NeedleIdSize + OffsetSize + SizeSize TimestampSize = 8 // int64 size NeedlePaddingSize = 8 - TombstoneFileSize = math.MaxUint32 + TombstoneFileSize = Size(-1) CookieSize = 4 ) @@ -49,3 +58,11 @@ func ParseCookie(cookieString string) (Cookie, error) { } return Cookie(cookie), nil } + +func BytesToSize(bytes []byte) Size { + return Size(util.BytesToUint32(bytes)) +} + +func SizeToBytes(bytes []byte, size Size) { + util.Uint32toBytes(bytes, uint32(size)) +} diff --git a/weed/storage/types/offset_4bytes.go b/weed/storage/types/offset_4bytes.go index 9acd069d3..5348d5b36 100644 --- a/weed/storage/types/offset_4bytes.go +++ b/weed/storage/types/offset_4bytes.go @@ -11,8 +11,8 @@ type OffsetHigher struct { } const ( - OffsetSize = 4 - MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 // 32GB + OffsetSize = 4 + MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 // 32GB ) func OffsetToBytes(bytes []byte, offset Offset) { @@ -54,7 +54,7 @@ func ToOffset(offset int64) Offset { return Uint32ToOffset(smaller) } -func (offset Offset) ToAcutalOffset() (actualOffset int64) { +func (offset Offset) ToActualOffset() (actualOffset int64) { return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize) } diff --git a/weed/storage/types/offset_5bytes.go b/weed/storage/types/offset_5bytes.go index f57e4f6d4..b6181fc11 100644 --- a/weed/storage/types/offset_5bytes.go +++ b/weed/storage/types/offset_5bytes.go @@ -11,8 +11,8 @@ type OffsetHigher struct { } const ( - OffsetSize = 4 + 1 - MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB + OffsetSize = 4 + 1 + MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB ) func OffsetToBytes(bytes []byte, offset Offset) { @@ -71,7 +71,7 @@ func ToOffset(offset int64) Offset { } } -func (offset Offset) ToAcutalOffset() (actualOffset int64) { +func (offset Offset) ToActualOffset() (actualOffset int64) { return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24 + int64(offset.b4)<<32) * int64(NeedlePaddingSize) } diff --git a/weed/storage/types/volume_disk_type.go b/weed/storage/types/volume_disk_type.go new file mode 100644 index 000000000..c9b87d802 --- /dev/null +++ b/weed/storage/types/volume_disk_type.go @@ -0,0 +1,40 @@ +package types + +import ( + "strings" +) + +type DiskType string + +const ( + HardDriveType DiskType = "" + SsdType = "ssd" +) + +func ToDiskType(vt string) (diskType DiskType) { + vt = strings.ToLower(vt) + diskType = HardDriveType + switch vt { + case "", "hdd": + diskType = HardDriveType + case "ssd": + diskType = SsdType + default: + diskType = DiskType(vt) + } + return +} + +func (diskType DiskType) String() string { + if diskType == "" { + return "" + } + return string(diskType) +} + +func (diskType DiskType) ReadableString() string { + if diskType == "" { + return "hdd" + } + return string(diskType) +} diff --git a/weed/storage/volume.go b/weed/storage/volume.go index acede66bf..e0638d8a8 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -21,20 +21,23 @@ import ( type Volume struct { Id needle.VolumeId dir string + dirIdx string Collection string DataBackend backend.BackendStorageFile nm NeedleMapper - needleMapKind NeedleMapType + needleMapKind NeedleMapKind noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete + noWriteLock sync.RWMutex hasRemoteFile bool // if the volume has a remote file MemoryMapMaxSizeMb uint32 super_block.SuperBlock dataFileAccessLock sync.RWMutex - lastModifiedTsSeconds uint64 //unix time in seconds - lastAppendAtNs uint64 //unix time in nanoseconds + asyncRequestsChan chan *needle.AsyncRequest + lastModifiedTsSeconds uint64 // unix time in seconds + lastAppendAtNs uint64 // unix time in nanoseconds lastCompactIndexOffset uint64 lastCompactRevision uint16 @@ -42,18 +45,26 @@ type Volume struct { isCompacting bool volumeInfo *volume_server_pb.VolumeInfo + location *DiskLocation + + lastIoError error } -func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) { +func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) { // if replicaPlacement is nil, the superblock will be loaded from disk - v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb} + v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb, + asyncRequestsChan: make(chan *needle.AsyncRequest, 128)} v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} v.needleMapKind = needleMapKind e = v.load(true, true, needleMapKind, preallocate) + v.startWorker() return } + func (v *Volume) String() string { - return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, noWrite:%v canDelete:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete) + v.noWriteLock.RLock() + defer v.noWriteLock.RUnlock() + return fmt.Sprintf("Id:%v dir:%s dirIdx:%s Collection:%s dataFile:%v nm:%v noWrite:%v canDelete:%v", v.Id, v.dir, v.dirIdx, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete) } func VolumeFileName(dir string, collection string, id int) (fileName string) { @@ -65,10 +76,24 @@ func VolumeFileName(dir string, collection string, id int) (fileName string) { } return } -func (v *Volume) FileName() (fileName string) { + +func (v *Volume) DataFileName() (fileName string) { return VolumeFileName(v.dir, v.Collection, int(v.Id)) } +func (v *Volume) IndexFileName() (fileName string) { + return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) +} + +func (v *Volume) FileName(ext string) (fileName string) { + switch ext { + case ".idx", ".cpx", ".ldb": + return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) + ext + } + // .dat, .cpd, .vif + return VolumeFileName(v.dir, v.Collection, int(v.Id)) + ext +} + func (v *Volume) Version() needle.Version { if v.volumeInfo.Version != 0 { v.SuperBlock.Version = needle.Version(v.volumeInfo.Version) @@ -146,6 +171,10 @@ func (v *Volume) IndexFileSize() uint64 { return v.nm.IndexFileSize() } +func (v *Volume) DiskType() types.DiskType { + return v.location.DiskType +} + // Close cleanly shuts down this volume func (v *Volume) Close() { v.dataFileAccessLock.Lock() @@ -169,20 +198,20 @@ func (v *Volume) NeedToReplicate() bool { // except when volume is empty // or when the volume does not have a ttl // or when volumeSizeLimit is 0 when server just starts -func (v *Volume) expired(volumeSizeLimit uint64) bool { +func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool { if volumeSizeLimit == 0 { - //skip if we don't know size limit + // skip if we don't know size limit return false } - if v.ContentSize() == 0 { + if contentSize <= super_block.SuperBlockSize { return false } if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds) + glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60 - glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) + glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } @@ -205,27 +234,54 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { return false } -func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { - size, _, modTime := v.FileStat() +func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + glog.V(3).Infof("collectStatus volume %d", v.Id) - volumInfo := &master_pb.VolumeInformationMessage{ + if v.nm == nil { + return + } + + ok = true + + maxFileKey = v.nm.MaxFileKey() + datFileSize, modTime, _ = v.DataBackend.GetStat() + fileCount = uint64(v.nm.FileCount()) + deletedCount = uint64(v.nm.DeletedCount()) + deletedSize = v.nm.DeletedSize() + fileCount = uint64(v.nm.FileCount()) + + return +} + +func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) { + + maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus() + + if !ok { + return 0, nil + } + + volumeInfo := &master_pb.VolumeInformationMessage{ Id: uint32(v.Id), - Size: size, + Size: uint64(volumeSize), Collection: v.Collection, - FileCount: v.FileCount(), - DeleteCount: v.DeletedCount(), - DeletedByteCount: v.DeletedSize(), - ReadOnly: v.noWriteOrDelete, + FileCount: fileCount, + DeleteCount: deletedCount, + DeletedByteCount: deletedSize, + ReadOnly: v.IsReadOnly(), ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), CompactRevision: uint32(v.SuperBlock.CompactionRevision), ModifiedAtSecond: modTime.Unix(), + DiskType: string(v.location.DiskType), } - volumInfo.RemoteStorageName, volumInfo.RemoteStorageKey = v.RemoteStorageNameKey() + volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey() - return volumInfo + return maxFileKey, volumeInfo } func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) { @@ -237,3 +293,9 @@ func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) { } return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey() } + +func (v *Volume) IsReadOnly() bool { + v.noWriteLock.RLock() + defer v.noWriteLock.RUnlock() + return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow +} diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go index ec29c895e..82ea12a89 100644 --- a/weed/storage/volume_backup.go +++ b/weed/storage/volume_backup.go @@ -64,8 +64,6 @@ update needle map when receiving new .dat bytes. But seems not necessary now.) func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error { - ctx := context.Background() - startFromOffset, _, _ := v.FileStat() appendAtNs, err := v.findLastAppendAtNs() if err != nil { @@ -76,7 +74,7 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{ + stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{ VolumeId: uint32(v.Id), SinceNs: appendAtNs, }) @@ -126,9 +124,9 @@ func (v *Volume) findLastAppendAtNs() (uint64, error) { } func (v *Volume) locateLastAppendEntry() (Offset, error) { - indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644) + indexFile, e := os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644) if e != nil { - return Offset{}, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e) + return Offset{}, fmt.Errorf("cannot read %s: %v", v.FileName(".idx"), e) } defer indexFile.Close() @@ -156,13 +154,13 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) { func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { - n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()) + n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset()) if err != nil { - return 0, fmt.Errorf("ReadNeedleHeader: %v", err) + return 0, fmt.Errorf("ReadNeedleHeader %s [%d,%d): %v", v.DataBackend.Name(), offset.ToActualOffset(), offset.ToActualOffset()+NeedleHeaderSize, err) } - _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength) + _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset()+NeedleHeaderSize, bodyLength) if err != nil { - return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err) + return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToActualOffset(), bodyLength, err) } return n.AppendAtNs, nil @@ -170,25 +168,13 @@ func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { // on server side func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast bool, err error) { - indexFile, openErr := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644) - if openErr != nil { - err = fmt.Errorf("cannot read %s.idx: %v", v.FileName(), openErr) - return - } - defer indexFile.Close() - fi, statErr := indexFile.Stat() - if statErr != nil { - err = fmt.Errorf("file %s stat error: %v", indexFile.Name(), statErr) - return - } - fileSize := fi.Size() + fileSize := int64(v.IndexFileSize()) if fileSize%NeedleMapEntrySize != 0 { - err = fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize) + err = fmt.Errorf("unexpected file %s.idx size: %d", v.IndexFileName(), fileSize) return } - bytes := make([]byte, NeedleMapEntrySize) entryCount := fileSize / NeedleMapEntrySize l := int64(0) h := entryCount @@ -202,7 +188,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast } // read the appendAtNs for entry m - offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, m) + offset, err = v.readOffsetFromIndex(m) if err != nil { return } @@ -226,19 +212,21 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast return Offset{}, true, nil } - offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l) + offset, err = v.readOffsetFromIndex(l) return offset, false, err } // bytes is of size NeedleMapEntrySize -func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) { - if _, readErr := indexFile.ReadAt(bytes, m*NeedleMapEntrySize); readErr != nil && readErr != io.EOF { - return Offset{}, readErr +func (v *Volume) readOffsetFromIndex(m int64) (Offset, error) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + if v.nm == nil { + return Offset{}, io.EOF } - _, offset, _ := idx.IdxFileEntry(bytes) - return offset, nil + _, offset, _, err := v.nm.ReadIndexEntry(m) + return offset, err } // generate the volume idx @@ -255,7 +243,7 @@ func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool { } func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - if n.Size > 0 && n.Size != TombstoneFileSize { + if n.Size > 0 && n.Size.IsValid() { return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size) } return scanner.v.nm.Delete(n.Id, ToOffset(offset)) diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index a65c2a3ff..b76933083 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -2,8 +2,11 @@ package storage import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "io" "os" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -11,29 +14,56 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uint64, e error) { +func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uint64, err error) { var indexSize int64 - if indexSize, e = verifyIndexFileIntegrity(indexFile); e != nil { - return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e) + if indexSize, err = verifyIndexFileIntegrity(indexFile); err != nil { + return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), err) } if indexSize == 0 { return 0, nil } + healthyIndexSize := indexSize + for i := 1; i <= 10 && indexSize >= int64(i)*NeedleMapEntrySize; i++ { + // check and fix last 10 entries + lastAppendAtNs, err = doCheckAndFixVolumeData(v, indexFile, indexSize-int64(i)*NeedleMapEntrySize) + if err == io.EOF { + healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize + continue + } + if err != ErrorSizeMismatch { + break + } + } + if healthyIndexSize < indexSize { + glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize) + err = indexFile.Truncate(healthyIndexSize) + if err != nil { + glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err) + } + } + return +} + +func doCheckAndFixVolumeData(v *Volume, indexFile *os.File, indexOffset int64) (lastAppendAtNs uint64, err error) { var lastIdxEntry []byte - if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil { - return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e) + if lastIdxEntry, err = readIndexEntryAtOffset(indexFile, indexOffset); err != nil { + return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), err) } key, offset, size := idx.IdxFileEntry(lastIdxEntry) if offset.IsZero() { return 0, nil } - if size == TombstoneFileSize { - size = 0 - } - if lastAppendAtNs, e = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToAcutalOffset(), key, size); e != nil { - return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e) + if size < 0 { + // read the deletion entry + if lastAppendAtNs, err = verifyDeletedNeedleIntegrity(v.DataBackend, v.Version(), key); err != nil { + return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), err) + } + } else { + if lastAppendAtNs, err = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToActualOffset(), key, size); err != nil { + return lastAppendAtNs, err + } } - return + return lastAppendAtNs, nil } func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) { @@ -55,13 +85,82 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err return } -func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { - n := new(needle.Needle) +func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size Size) (lastAppendAtNs uint64, err error) { + n, _, _, err := needle.ReadNeedleHeader(datFile, v, offset) + if err == io.EOF { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset) + } + if n.Size != size { + return 0, ErrorSizeMismatch + } + if v == needle.Version3 { + bytes := make([]byte, TimestampSize) + _, err = datFile.ReadAt(bytes, offset+NeedleHeaderSize+int64(size)+needle.NeedleChecksumSize) + if err == io.EOF { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("verifyNeedleIntegrity check %s entry offset %d size %d: %v", datFile.Name(), offset, size, err) + } + n.AppendAtNs = util.BytesToUint64(bytes) + fileTailOffset := offset + needle.GetActualSize(size, v) + fileSize, _, err := datFile.GetStat() + if err != nil { + return 0, fmt.Errorf("stat file %s: %v", datFile.Name(), err) + } + if fileSize == fileTailOffset { + return n.AppendAtNs, nil + } + if fileSize > fileTailOffset { + glog.Warningf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset) + err = datFile.Truncate(fileTailOffset) + if err == nil { + return n.AppendAtNs, nil + } + return n.AppendAtNs, fmt.Errorf("truncate file %s: %v", datFile.Name(), err) + } + glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) + } if err = n.ReadData(datFile, offset, size, v); err != nil { - return n.AppendAtNs, err + return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err) + } + if n.Id != key { + return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id) + } + return n.AppendAtNs, err +} + +func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, key NeedleId) (lastAppendAtNs uint64, err error) { + n := new(needle.Needle) + size := n.DiskSize(v) + var fileSize int64 + fileSize, _, err = datFile.GetStat() + if err != nil { + return 0, fmt.Errorf("GetStat: %v", err) + } + if err = n.ReadData(datFile, fileSize-size, Size(0), v); err != nil { + return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", fileSize-size, size, err) } if n.Id != key { return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id) } return n.AppendAtNs, err } + +func (v *Volume) checkIdxFile() error { + datFileSize, _, err := v.DataBackend.GetStat() + if err != nil { + return fmt.Errorf("get stat %s: %v", v.FileName(".dat"), err) + } + if datFileSize <= super_block.SuperBlockSize { + return nil + } + indexFileName := v.FileName(".idx") + if util.FileExists(indexFileName) { + return nil + } + return fmt.Errorf("idx file %s does not exists", indexFileName) +} diff --git a/weed/storage/volume_create.go b/weed/storage/volume_create.go deleted file mode 100644 index ffcb246a4..000000000 --- a/weed/storage/volume_create.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !linux,!windows - -package storage - -import ( - "os" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend" -) - -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { - file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if e != nil { - return nil, e - } - if preallocate > 0 { - glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) - } - return backend.NewDiskFile(file), nil -} diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index 313818cde..9c64c9682 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -14,6 +14,7 @@ type VolumeInfo struct { Size uint64 ReplicaPlacement *super_block.ReplicaPlacement Ttl *needle.TTL + DiskType string Collection string Version needle.Version FileCount int @@ -40,6 +41,7 @@ func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err er ModifiedAtSecond: m.ModifiedAtSecond, RemoteStorageName: m.RemoteStorageName, RemoteStorageKey: m.RemoteStorageKey, + DiskType: m.DiskType, } rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { @@ -62,6 +64,7 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu } vi.ReplicaPlacement = rp vi.Ttl = needle.LoadTTLFromUint32(m.Ttl) + vi.DiskType = m.DiskType return vi, nil } @@ -90,6 +93,7 @@ func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMe ModifiedAtSecond: vi.ModifiedAtSecond, RemoteStorageName: vi.RemoteStorageName, RemoteStorageKey: vi.RemoteStorageKey, + DiskType: vi.DiskType, } } diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index fa1f7d617..0cf603ad8 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -14,7 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, err error) { +func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind) (v *Volume, err error) { v = &Volume{dir: dirname, Collection: collection, Id: id} v.SuperBlock = super_block.SuperBlock{} v.needleMapKind = needleMapKind @@ -22,31 +22,42 @@ func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeI return } -func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) (err error) { - fileName := v.FileName() +func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapKind, preallocate int64) (err error) { alreadyHasSuperBlock := false - if !v.maybeLoadVolumeInfo() { - v.SaveVolumeInfo() - } + hasLoadedVolume := false + defer func() { + if !hasLoadedVolume { + if v.nm != nil { + v.nm.Close() + v.nm = nil + } + if v.DataBackend != nil { + v.DataBackend.Close() + v.DataBackend = nil + } + } + }() + + hasVolumeInfoFile := v.maybeLoadVolumeInfo() if v.HasRemoteFile() { v.noWriteCanDelete = true v.noWriteOrDelete = false - glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files) + glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) v.LoadRemoteFile() alreadyHasSuperBlock = true - } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists { + } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists { // open dat file if !canRead { - return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) + return fmt.Errorf("cannot read Volume Data file %s", v.FileName(".dat")) } var dataFile *os.File if canWrite { - dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) + dataFile, err = os.OpenFile(v.FileName(".dat"), os.O_RDWR|os.O_CREATE, 0644) } else { - glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode") - dataFile, err = os.Open(fileName + ".dat") + glog.V(0).Infof("opening %s in READONLY mode", v.FileName(".dat")) + dataFile, err = os.Open(v.FileName(".dat")) v.noWriteOrDelete = true } v.lastModifiedTsSeconds = uint64(modifiedTime.Unix()) @@ -56,92 +67,117 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind v.DataBackend = backend.NewDiskFile(dataFile) } else { if createDatIfMissing { - v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb) + v.DataBackend, err = backend.CreateVolumeFile(v.FileName(".dat"), preallocate, v.MemoryMapMaxSizeMb) } else { - return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName) + return fmt.Errorf("volume data file %s does not exist", v.FileName(".dat")) } } if err != nil { if !os.IsPermission(err) { - return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, err) + return fmt.Errorf("cannot load volume data %s: %v", v.FileName(".dat"), err) } else { - return fmt.Errorf("load data file %s.dat: %v", fileName, err) + return fmt.Errorf("load data file %s: %v", v.FileName(".dat"), err) } } if alreadyHasSuperBlock { err = v.readSuperBlock() + glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) + if v.HasRemoteFile() { + // maybe temporary network problem + glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) + err = nil + } } else { if !v.SuperBlock.Initialized() { - return fmt.Errorf("volume %s.dat not initialized", fileName) + return fmt.Errorf("volume %s not initialized", v.FileName(".dat")) } err = v.maybeWriteSuperBlock() } if err == nil && alsoLoadIndex { + // adjust for existing volumes with .idx together with .dat files + if v.dirIdx != v.dir { + if util.FileExists(v.DataFileName() + ".idx") { + v.dirIdx = v.dir + } + } + // check volume idx files + if err := v.checkIdxFile(); err != nil { + glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err) + } var indexFile *os.File if v.noWriteOrDelete { - glog.V(0).Infoln("open to read file", fileName+".idx") - if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil { - return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err) + glog.V(0).Infoln("open to read file", v.FileName(".idx")) + if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644); err != nil { + return fmt.Errorf("cannot read Volume Index %s: %v", v.FileName(".idx"), err) } } else { - glog.V(1).Infoln("open to write file", fileName+".idx") - if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil { - return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err) + glog.V(1).Infoln("open to write file", v.FileName(".idx")) + if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDWR|os.O_CREATE, 0644); err != nil { + return fmt.Errorf("cannot write Volume Index %s: %v", v.FileName(".idx"), err) } } - if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil { + if v.lastAppendAtNs, err = CheckAndFixVolumeDataIntegrity(v, indexFile); err != nil { v.noWriteOrDelete = true glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) } if v.noWriteOrDelete || v.noWriteCanDelete { - if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil { - glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err) + if v.nm, err = NewSortedFileNeedleMap(v.IndexFileName(), indexFile); err != nil { + glog.V(0).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err) } } else { switch needleMapKind { case NeedleMapInMemory: - glog.V(0).Infoln("loading index", fileName+".idx", "to memory") + glog.V(0).Infoln("loading index", v.FileName(".idx"), "to memory") if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil { - glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err) + glog.V(0).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err) } case NeedleMapLevelDb: - glog.V(0).Infoln("loading leveldb", fileName+".ldb") + glog.V(0).Infoln("loading leveldb", v.FileName(".ldb")) opts := &opt.Options{ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB CompactionTableSizeMultiplier: 10, // default value is 1 } - if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } case NeedleMapLevelDbMedium: - glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") + glog.V(0).Infoln("loading leveldb medium", v.FileName(".ldb")) opts := &opt.Options{ BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB CompactionTableSizeMultiplier: 10, // default value is 1 } - if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } case NeedleMapLevelDbLarge: - glog.V(0).Infoln("loading leveldb large", fileName+".ldb") + glog.V(0).Infoln("loading leveldb large", v.FileName(".ldb")) opts := &opt.Options{ BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB CompactionTableSizeMultiplier: 10, // default value is 1 } - if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } } } + if !hasVolumeInfoFile { + v.volumeInfo.Version = uint32(v.SuperBlock.Version) + v.SaveVolumeInfo() + } + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc() + if err == nil { + hasLoadedVolume = true + } + return err } diff --git a/weed/storage/volume_read.go b/weed/storage/volume_read.go new file mode 100644 index 000000000..f689eeec0 --- /dev/null +++ b/weed/storage/volume_read.go @@ -0,0 +1,131 @@ +package storage + +import ( + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +// read fills in Needle content by looking up n.Id from NeedleMapper +func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, error) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + + nv, ok := v.nm.Get(n.Id) + if !ok || nv.Offset.IsZero() { + return -1, ErrorNotFound + } + readSize := nv.Size + if readSize.IsDeleted() { + if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { + glog.V(3).Infof("reading deleted %s", n.String()) + readSize = -readSize + } else { + return -1, ErrorDeleted + } + } + if readSize == 0 { + return 0, nil + } + err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version()) + if err == needle.ErrorSizeMismatch && OffsetSize == 4 { + err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version()) + } + v.checkReadWriteError(err) + if err != nil { + return 0, err + } + bytesRead := len(n.Data) + if !n.HasTtl() { + return bytesRead, nil + } + ttlMinutes := n.Ttl.Minutes() + if ttlMinutes == 0 { + return bytesRead, nil + } + if !n.HasLastModifiedDate() { + return bytesRead, nil + } + if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) { + return bytesRead, nil + } + return -1, ErrorNotFound +} + +// read fills in Needle content by looking up n.Id from NeedleMapper +func (v *Volume) ReadNeedleBlob(offset int64, size Size) ([]byte, error) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + + return needle.ReadNeedleBlob(v.DataBackend, offset, size, v.Version()) +} + +type VolumeFileScanner interface { + VisitSuperBlock(super_block.SuperBlock) error + ReadNeedleBody() bool + VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error +} + +func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, + needleMapKind NeedleMapKind, + volumeFileScanner VolumeFileScanner) (err error) { + var v *Volume + if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { + return fmt.Errorf("failed to load volume %d: %v", id, err) + } + if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { + return fmt.Errorf("failed to process volume %d super block: %v", id, err) + } + defer v.Close() + + version := v.Version() + + offset := int64(v.SuperBlock.BlockSize()) + + return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) +} + +func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { + n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) + if e != nil { + if e == io.EOF { + return nil + } + return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) + } + for n != nil { + var needleBody []byte + if volumeFileScanner.ReadNeedleBody() { + // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest) + if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { + glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) + // err = fmt.Errorf("cannot read needle body: %v", err) + // return + } + } + err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody) + if err == io.EOF { + return nil + } + if err != nil { + glog.V(0).Infof("visit needle error: %v", err) + return fmt.Errorf("visit needle error: %v", err) + } + offset += NeedleHeaderSize + rest + glog.V(4).Infof("==> new entry offset %d", offset) + if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) + } + glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) + } + return nil +} diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go deleted file mode 100644 index 0aa3f794b..000000000 --- a/weed/storage/volume_read_write.go +++ /dev/null @@ -1,237 +0,0 @@ -package storage - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/storage/super_block" - . "github.com/chrislusf/seaweedfs/weed/storage/types" -) - -var ErrorNotFound = errors.New("not found") - -// isFileUnchanged checks whether this needle to write is same as last one. -// It requires serialized access in the same volume. -func (v *Volume) isFileUnchanged(n *needle.Needle) bool { - if v.Ttl.String() != "" { - return false - } - - nv, ok := v.nm.Get(n.Id) - if ok && !nv.Offset.IsZero() && nv.Size != TombstoneFileSize { - oldNeedle := new(needle.Needle) - err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) - if err != nil { - glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err) - return false - } - if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) { - n.DataSize = oldNeedle.DataSize - return true - } - } - return false -} - -// Destroy removes everything related to this volume -func (v *Volume) Destroy() (err error) { - if v.isCompacting { - err = fmt.Errorf("volume %d is compacting", v.Id) - return - } - storageName, storageKey := v.RemoteStorageNameKey() - if v.HasRemoteFile() && storageName != "" && storageKey != "" { - if backendStorage, found := backend.BackendStorages[storageName]; found { - backendStorage.DeleteFile(storageKey) - } - } - v.Close() - os.Remove(v.FileName() + ".dat") - os.Remove(v.FileName() + ".idx") - os.Remove(v.FileName() + ".vif") - os.Remove(v.FileName() + ".sdx") - os.Remove(v.FileName() + ".cpd") - os.Remove(v.FileName() + ".cpx") - os.RemoveAll(v.FileName() + ".ldb") - return -} - -func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) { - glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() - if v.isFileUnchanged(n) { - size = n.DataSize - isUnchanged = true - return - } - - if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL { - n.SetHasTtl() - n.Ttl = v.Ttl - } - - // check whether existing needle cookie matches - nv, ok := v.nm.Get(n.Id) - if ok { - existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset()) - if existingNeedleReadErr != nil { - err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr) - return - } - if existingNeedle.Cookie != n.Cookie { - glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie) - err = fmt.Errorf("mismatching cookie %x", n.Cookie) - return - } - } - - // append to dat file - n.AppendAtNs = uint64(time.Now().UnixNano()) - if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil { - return - } - v.lastAppendAtNs = n.AppendAtNs - - // add to needle map - if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset { - if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { - glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) - } - } - if v.lastModifiedTsSeconds < n.LastModified { - v.lastModifiedTsSeconds = n.LastModified - } - return -} - -func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { - glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() - nv, ok := v.nm.Get(n.Id) - //fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) - if ok && nv.Size != TombstoneFileSize { - size := nv.Size - n.Data = nil - n.AppendAtNs = uint64(time.Now().UnixNano()) - offset, _, _, err := n.Append(v.DataBackend, v.Version()) - if err != nil { - return size, err - } - v.lastAppendAtNs = n.AppendAtNs - if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil { - return size, err - } - return size, err - } - return 0, nil -} - -// read fills in Needle content by looking up n.Id from NeedleMapper -func (v *Volume) readNeedle(n *needle.Needle) (int, error) { - v.dataFileAccessLock.RLock() - defer v.dataFileAccessLock.RUnlock() - - nv, ok := v.nm.Get(n.Id) - if !ok || nv.Offset.IsZero() { - return -1, ErrorNotFound - } - if nv.Size == TombstoneFileSize { - return -1, errors.New("already deleted") - } - if nv.Size == 0 { - return 0, nil - } - err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) - if err != nil { - return 0, err - } - bytesRead := len(n.Data) - if !n.HasTtl() { - return bytesRead, nil - } - ttlMinutes := n.Ttl.Minutes() - if ttlMinutes == 0 { - return bytesRead, nil - } - if !n.HasLastModifiedDate() { - return bytesRead, nil - } - if uint64(time.Now().Unix()) < n.LastModified+uint64(ttlMinutes*60) { - return bytesRead, nil - } - return -1, ErrorNotFound -} - -type VolumeFileScanner interface { - VisitSuperBlock(super_block.SuperBlock) error - ReadNeedleBody() bool - VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error -} - -func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, - needleMapKind NeedleMapType, - volumeFileScanner VolumeFileScanner) (err error) { - var v *Volume - if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { - return fmt.Errorf("failed to load volume %d: %v", id, err) - } - if v.volumeInfo.Version == 0 { - if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { - return fmt.Errorf("failed to process volume %d super block: %v", id, err) - } - } - defer v.Close() - - version := v.Version() - - offset := int64(v.SuperBlock.BlockSize()) - - return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) -} - -func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { - n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) - if e != nil { - if e == io.EOF { - return nil - } - return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) - } - for n != nil { - var needleBody []byte - if volumeFileScanner.ReadNeedleBody() { - if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { - glog.V(0).Infof("cannot read needle body: %v", err) - //err = fmt.Errorf("cannot read needle body: %v", err) - //return - } - } - err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody) - if err == io.EOF { - return nil - } - if err != nil { - glog.V(0).Infof("visit needle error: %v", err) - return fmt.Errorf("visit needle error: %v", err) - } - offset += NeedleHeaderSize + rest - glog.V(4).Infof("==> new entry offset %d", offset) - if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { - if err == io.EOF { - return nil - } - return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) - } - glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) - } - return nil -} diff --git a/weed/storage/volume_stream_write.go b/weed/storage/volume_stream_write.go new file mode 100644 index 000000000..d229bdf20 --- /dev/null +++ b/weed/storage/volume_stream_write.go @@ -0,0 +1,104 @@ +package storage + +import ( + "bufio" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func (v *Volume) StreamWrite(n *needle.Needle, data io.Reader, dataSize uint32) (err error) { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + df, ok := v.DataBackend.(*backend.DiskFile) + if !ok { + return fmt.Errorf("unexpected volume backend") + } + offset, _, _ := v.DataBackend.GetStat() + + header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation + CookieToBytes(header[0:CookieSize], n.Cookie) + NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id) + n.Size = 4 + Size(dataSize) + 1 + SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) + + n.DataSize = dataSize + + // needle header + df.Write(header[0:NeedleHeaderSize]) + + // data size and data + util.Uint32toBytes(header[0:4], n.DataSize) + df.Write(header[0:4]) + // write and calculate CRC + crcWriter := needle.NewCRCwriter(df) + io.Copy(crcWriter, io.LimitReader(data, int64(dataSize))) + + // flags + util.Uint8toBytes(header[0:1], n.Flags) + df.Write(header[0:1]) + + // data checksum + util.Uint32toBytes(header[0:needle.NeedleChecksumSize], crcWriter.Sum()) + // write timestamp, padding + n.AppendAtNs = uint64(time.Now().UnixNano()) + util.Uint64toBytes(header[needle.NeedleChecksumSize:needle.NeedleChecksumSize+TimestampSize], n.AppendAtNs) + padding := needle.PaddingLength(n.Size, needle.Version3) + df.Write(header[0 : needle.NeedleChecksumSize+TimestampSize+padding]) + + // add to needle map + if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { + glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) + } + return +} + +func (v *Volume) StreamRead(n *needle.Needle, writer io.Writer) (err error) { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + nv, ok := v.nm.Get(n.Id) + if !ok || nv.Offset.IsZero() { + return ErrorNotFound + } + + sr := &StreamReader{ + readerAt: v.DataBackend, + offset: nv.Offset.ToActualOffset(), + } + bufReader := bufio.NewReader(sr) + bufReader.Discard(NeedleHeaderSize) + sizeBuf := make([]byte, 4) + bufReader.Read(sizeBuf) + if _, err = writer.Write(sizeBuf); err != nil { + return err + } + dataSize := util.BytesToUint32(sizeBuf) + + _, err = io.Copy(writer, io.LimitReader(bufReader, int64(dataSize))) + + return +} + +type StreamReader struct { + offset int64 + readerAt io.ReaderAt +} + +func (sr *StreamReader) Read(p []byte) (n int, err error) { + n, err = sr.readerAt.ReadAt(p, sr.offset) + if err != nil { + return + } + sr.offset += int64(n) + return +} diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index 61c09d85a..20223ac1b 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -1,6 +1,7 @@ package storage import ( + "fmt" "os" "github.com/chrislusf/seaweedfs/weed/glog" @@ -25,8 +26,10 @@ func (v *Volume) maybeWriteSuperBlock() error { if dataFile, e = os.Create(v.DataBackend.Name()); e == nil { v.DataBackend = backend.NewDiskFile(dataFile) if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil { + v.noWriteLock.Lock() v.noWriteOrDelete = false v.noWriteCanDelete = false + v.noWriteLock.Unlock() } } } @@ -36,5 +39,12 @@ func (v *Volume) maybeWriteSuperBlock() error { func (v *Volume) readSuperBlock() (err error) { v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend) + if v.volumeInfo != nil && v.volumeInfo.Replication != "" { + if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil { + return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err) + } else { + v.SuperBlock.ReplicaPlacement = replication + } + } return err } diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 99071285f..23160906b 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { @@ -14,12 +15,21 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { func (v *Volume) maybeLoadVolumeInfo() (found bool) { - v.volumeInfo, found = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") + var err error + v.volumeInfo, v.hasRemoteFile, found, err = pb.MaybeLoadVolumeInfo(v.FileName(".vif")) - if found { + if v.volumeInfo.Version == 0 { + v.volumeInfo.Version = uint32(needle.CurrentVersion) + } + + if v.hasRemoteFile { glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) - v.hasRemoteFile = true + } + + if err != nil { + glog.Warningf("load volume %d.vif file: %v", v.Id, err) + return } return @@ -44,7 +54,7 @@ func (v *Volume) LoadRemoteFile() error { func (v *Volume) SaveVolumeInfo() error { - tierFileName := v.FileName() + ".vif" + tierFileName := v.FileName(".vif") return pb.SaveVolumeInfo(tierFileName, v.volumeInfo) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 434b5989d..be84f8a13 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "os" + "runtime" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -48,15 +49,20 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error v.isCompacting = false }() - filePath := v.FileName() v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) - return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) + if err := v.DataBackend.Sync(); err != nil { + glog.V(0).Infof("compact fail to sync volume %d", v.Id) + } + if err := v.nm.Sync(); err != nil { + glog.V(0).Infof("compact fail to sync volume idx %d", v.Id) + } + return v.copyDataAndGenerateIndexFile(v.FileName(".cpd"), v.FileName(".cpx"), preallocate, compactionBytePerSecond) } // compact a volume based on deletions in .idx files -func (v *Volume) Compact2(preallocate int64) error { +func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) error { if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil @@ -68,11 +74,16 @@ func (v *Volume) Compact2(preallocate int64) error { v.isCompacting = false }() - filePath := v.FileName() v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ...", v.Id) - return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx", preallocate) + if err := v.DataBackend.Sync(); err != nil { + glog.V(0).Infof("compact2 fail to sync volume dat %d: %v", v.Id, err) + } + if err := v.nm.Sync(); err != nil { + glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err) + } + return copyDataBasedOnIndexFile(v.FileName(".dat"), v.FileName(".idx"), v.FileName(".cpd"), v.FileName(".cpx"), v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond) } func (v *Volume) CommitCompact() error { @@ -91,38 +102,49 @@ func (v *Volume) CommitCompact() error { glog.V(3).Infof("Got volume %d committing lock...", v.Id) v.nm.Close() - if err := v.DataBackend.Close(); err != nil { - glog.V(0).Infof("fail to close volume %d", v.Id) + if v.DataBackend != nil { + if err := v.DataBackend.Close(); err != nil { + glog.V(0).Infof("fail to close volume %d", v.Id) + } } v.DataBackend = nil stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() var e error - if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { + if e = v.makeupDiff(v.FileName(".cpd"), v.FileName(".cpx"), v.FileName(".dat"), v.FileName(".idx")); e != nil { glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) - e = os.Remove(v.FileName() + ".cpd") + e = os.Remove(v.FileName(".cpd")) if e != nil { return e } - e = os.Remove(v.FileName() + ".cpx") + e = os.Remove(v.FileName(".cpx")) if e != nil { return e } } else { + if runtime.GOOS == "windows" { + e = os.RemoveAll(v.FileName(".dat")) + if e != nil { + return e + } + e = os.RemoveAll(v.FileName(".idx")) + if e != nil { + return e + } + } var e error - if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) + if e = os.Rename(v.FileName(".cpd"), v.FileName(".dat")); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName(".cpd"), e) } - if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpx", e) + if e = os.Rename(v.FileName(".cpx"), v.FileName(".idx")); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName(".cpx"), e) } } //glog.V(3).Infof("Pretending to be vacuuming...") //time.Sleep(20 * time.Second) - os.RemoveAll(v.FileName() + ".ldb") - os.RemoveAll(v.FileName() + ".bdb") + os.RemoveAll(v.FileName(".ldb")) glog.V(3).Infof("Loading volume %d commit file...", v.Id) if e = v.load(true, false, v.needleMapKind, 0); e != nil { @@ -134,8 +156,8 @@ func (v *Volume) CommitCompact() error { func (v *Volume) cleanupCompact() error { glog.V(0).Infof("Cleaning up volume %d vacuuming...", v.Id) - e1 := os.Remove(v.FileName() + ".cpd") - e2 := os.Remove(v.FileName() + ".cpx") + e1 := os.Remove(v.FileName(".cpd")) + e2 := os.Remove(v.FileName(".cpx")) if e1 != nil { return e1 } @@ -158,9 +180,15 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI var indexSize int64 oldIdxFile, err := os.Open(oldIdxFileName) + if err != nil { + return fmt.Errorf("makeupDiff open %s failed: %v", oldIdxFileName, err) + } defer oldIdxFile.Close() oldDatFile, err := os.Open(oldDatFileName) + if err != nil { + return fmt.Errorf("makeupDiff open %s failed: %v", oldDatFileName, err) + } oldDatBackend := backend.NewDiskFile(oldDatFile) defer oldDatBackend.Close() @@ -183,7 +211,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI type keyField struct { offset Offset - size uint32 + size Size } incrementedHasUpdatedIndexEntry := make(map[NeedleId]keyField) @@ -250,15 +278,15 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI } //updated needle - if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize { + if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() { //even the needle cache in memory is hit, the need_bytes is correct - glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size) + glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size) var needleBytes []byte - needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version()) + needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, v.Version()) if err != nil { - return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err) + return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, err) } - dst.Write(needleBytes) + dstDatBackend.Write(needleBytes) util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize)) } else { //deleted needle //fakeDelNeedle 's default Data field is nil @@ -311,7 +339,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in } nv, ok := scanner.v.nm.Get(n.Id) glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { + if ok && nv.Offset.ToActualOffset() == offset && nv.Size > 0 && nv.Size.IsValid() { if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } @@ -330,12 +358,13 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca var ( dst backend.BackendStorageFile ) - if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil { + if dst, err = backend.CreateVolumeFile(dstName, preallocate, 0); err != nil { return } defer dst.Close() nm := needle_map.NewMemDb() + defer nm.Close() scanner := &VolumeFileScanner4Vacuum{ v: v, @@ -353,64 +382,70 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca return } -func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate int64) (err error) { +func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64, compactionBytePerSecond int64) (err error) { var ( - dstDatBackend backend.BackendStorageFile - oldIndexFile *os.File + srcDatBackend, dstDatBackend backend.BackendStorageFile + dataFile *os.File ) - if dstDatBackend, err = createVolumeFile(dstName, preallocate, 0); err != nil { + if dstDatBackend, err = backend.CreateVolumeFile(dstDatName, preallocate, 0); err != nil { return } defer dstDatBackend.Close() - if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil { + oldNm := needle_map.NewMemDb() + defer oldNm.Close() + newNm := needle_map.NewMemDb() + defer newNm.Close() + if err = oldNm.LoadFromIdx(srcIdxName); err != nil { return } - defer oldIndexFile.Close() - - nm := needle_map.NewMemDb() + if dataFile, err = os.Open(srcDatName); err != nil { + return err + } + srcDatBackend = backend.NewDiskFile(dataFile) + defer srcDatBackend.Close() now := uint64(time.Now().Unix()) - v.SuperBlock.CompactionRevision++ - dstDatBackend.WriteAt(v.SuperBlock.Bytes(), 0) - newOffset := int64(v.SuperBlock.BlockSize()) + sb.CompactionRevision++ + dstDatBackend.WriteAt(sb.Bytes(), 0) + newOffset := int64(sb.BlockSize()) - idx2.WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset.IsZero() || size == TombstoneFileSize { - return nil - } + writeThrottler := util.NewWriteThrottler(compactionBytePerSecond) + + oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { + + offset, size := value.Offset, value.Size - nv, ok := v.nm.Get(key) - if !ok { + if offset.IsZero() || size.IsDeleted() { return nil } n := new(needle.Needle) - err := n.ReadData(v.DataBackend, offset.ToAcutalOffset(), size, v.Version()) + err := n.ReadData(srcDatBackend, offset.ToActualOffset(), size, version) if err != nil { return nil } - if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { + if n.HasTtl() && now >= n.LastModified+uint64(sb.Ttl.Minutes()*60) { return nil } - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if nv.Offset == offset && nv.Size > 0 { - if err = nm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { - return fmt.Errorf("cannot put needle: %s", err) - } - if _, _, _, err = n.Append(dstDatBackend, v.Version()); err != nil { - return fmt.Errorf("cannot append needle: %s", err) - } - newOffset += n.DiskSize(v.Version()) - glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { + return fmt.Errorf("cannot put needle: %s", err) } + if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil { + return fmt.Errorf("cannot append needle: %s", err) + } + delta := n.DiskSize(version) + newOffset += delta + writeThrottler.MaybeSlowdown(delta) + glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + return nil }) - nm.SaveToIdx(idxName) + newNm.SaveToIdx(datIdxName) return } diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 95f43d6ec..cd5a4f430 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -69,7 +69,7 @@ func TestCompaction(t *testing.T) { } defer os.RemoveAll(dir) // clean up - v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) + v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) if err != nil { t.Fatalf("volume creation: %v", err) } @@ -84,7 +84,7 @@ func TestCompaction(t *testing.T) { } startTime := time.Now() - v.Compact2(0) + v.Compact2(0, 0) speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds() t.Logf("compaction speed: %.2f bytes/s", speed) @@ -96,7 +96,7 @@ func TestCompaction(t *testing.T) { v.Close() - v, err = NewVolume(dir, "", 1, NeedleMapInMemory, nil, nil, 0, 0) + v, err = NewVolume(dir, dir, "", 1, NeedleMapInMemory, nil, nil, 0, 0) if err != nil { t.Fatalf("volume reloading: %v", err) } @@ -113,11 +113,11 @@ func TestCompaction(t *testing.T) { } n := newEmptyNeedle(uint64(i)) - size, err := v.readNeedle(n) + size, err := v.readNeedle(n, nil) if err != nil { t.Fatalf("read file %d: %v", i, err) } - if infos[i-1].size != uint32(size) { + if infos[i-1].size != types.Size(size) { t.Fatalf("read file %d size mismatch expected %d found %d", i, infos[i-1].size, size) } if infos[i-1].crc != n.Checksum { @@ -129,7 +129,7 @@ func TestCompaction(t *testing.T) { } func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) { n := newRandomNeedle(uint64(i)) - _, size, _, err := v.writeNeedle(n) + _, size, _, err := v.writeNeedle2(n, false) if err != nil { t.Fatalf("write file %d: %v", i, err) } @@ -141,7 +141,7 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) { if rand.Float64() < 0.03 { toBeDeleted := rand.Intn(i) + 1 oldNeedle := newEmptyNeedle(uint64(toBeDeleted)) - v.deleteNeedle(oldNeedle) + v.deleteNeedle2(oldNeedle) // println("deleted file", toBeDeleted) infos[toBeDeleted-1] = &needleInfo{ size: 0, @@ -151,7 +151,7 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) { } type needleInfo struct { - size uint32 + size types.Size crc needle.CRC } diff --git a/weed/storage/volume_write.go b/weed/storage/volume_write.go new file mode 100644 index 000000000..a286c5dd5 --- /dev/null +++ b/weed/storage/volume_write.go @@ -0,0 +1,327 @@ +package storage + +import ( + "bytes" + "errors" + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +var ErrorNotFound = errors.New("not found") +var ErrorDeleted = errors.New("already deleted") +var ErrorSizeMismatch = errors.New("size mismatch") + +func (v *Volume) checkReadWriteError(err error) { + if err == nil { + if v.lastIoError != nil { + v.lastIoError = nil + } + return + } + if err.Error() == "input/output error" { + v.lastIoError = err + } +} + +// isFileUnchanged checks whether this needle to write is same as last one. +// It requires serialized access in the same volume. +func (v *Volume) isFileUnchanged(n *needle.Needle) bool { + if v.Ttl.String() != "" { + return false + } + + nv, ok := v.nm.Get(n.Id) + if ok && !nv.Offset.IsZero() && nv.Size.IsValid() { + oldNeedle := new(needle.Needle) + err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), nv.Size, v.Version()) + if err != nil { + glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err) + return false + } + if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) { + n.DataSize = oldNeedle.DataSize + return true + } + } + return false +} + +// Destroy removes everything related to this volume +func (v *Volume) Destroy() (err error) { + if v.isCompacting { + err = fmt.Errorf("volume %d is compacting", v.Id) + return + } + close(v.asyncRequestsChan) + storageName, storageKey := v.RemoteStorageNameKey() + if v.HasRemoteFile() && storageName != "" && storageKey != "" { + if backendStorage, found := backend.BackendStorages[storageName]; found { + backendStorage.DeleteFile(storageKey) + } + } + v.Close() + removeVolumeFiles(v.DataFileName()) + removeVolumeFiles(v.IndexFileName()) + return +} + +func removeVolumeFiles(filename string) { + // basic + os.Remove(filename + ".dat") + os.Remove(filename + ".idx") + os.Remove(filename + ".vif") + // sorted index file + os.Remove(filename + ".sdx") + // compaction + os.Remove(filename + ".cpd") + os.Remove(filename + ".cpx") + // level db indx file + os.RemoveAll(filename + ".ldb") + // marker for damaged or incomplete volume + os.Remove(filename + ".note") +} + +func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) { + v.asyncRequestsChan <- request +} + +func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) { + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + actualSize := needle.GetActualSize(Size(len(n.Data)), v.Version()) + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) { + err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize()) + return + } + + return v.doWriteRequest(n) +} + +func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) { + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL { + n.SetHasTtl() + n.Ttl = v.Ttl + } + + if !fsync { + return v.syncWrite(n) + } else { + asyncRequest := needle.NewAsyncRequest(n, true) + // using len(n.Data) here instead of n.Size before n.Size is populated in n.Append() + asyncRequest.ActualSize = needle.GetActualSize(Size(len(n.Data)), v.Version()) + + v.asyncRequestAppend(asyncRequest) + offset, _, isUnchanged, err = asyncRequest.WaitComplete() + + return + } +} + +func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) { + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + if v.isFileUnchanged(n) { + size = Size(n.DataSize) + isUnchanged = true + return + } + + // check whether existing needle cookie matches + nv, ok := v.nm.Get(n.Id) + if ok { + existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset()) + if existingNeedleReadErr != nil { + err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr) + return + } + if existingNeedle.Cookie != n.Cookie { + glog.V(0).Infof("write cookie mismatch: existing %s, new %s", + needle.NewFileIdFromNeedle(v.Id, existingNeedle), needle.NewFileIdFromNeedle(v.Id, n)) + err = fmt.Errorf("mismatching cookie %x", n.Cookie) + return + } + } + + // append to dat file + n.AppendAtNs = uint64(time.Now().UnixNano()) + offset, size, _, err = n.Append(v.DataBackend, v.Version()) + v.checkReadWriteError(err) + if err != nil { + return + } + v.lastAppendAtNs = n.AppendAtNs + + // add to needle map + if !ok || uint64(nv.Offset.ToActualOffset()) < offset { + if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { + glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) + } + } + if v.lastModifiedTsSeconds < n.LastModified { + v.lastModifiedTsSeconds = n.LastModified + } + return +} + +func (v *Volume) syncDelete(n *needle.Needle) (Size, error) { + // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + actualSize := needle.GetActualSize(0, v.Version()) + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) { + err := fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize()) + return 0, err + } + + return v.doDeleteRequest(n) +} + +func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) { + // todo: delete info is always appended no fsync, it may need fsync in future + fsync := false + + if !fsync { + return v.syncDelete(n) + } else { + asyncRequest := needle.NewAsyncRequest(n, false) + asyncRequest.ActualSize = needle.GetActualSize(0, v.Version()) + + v.asyncRequestAppend(asyncRequest) + _, size, _, err := asyncRequest.WaitComplete() + + return Size(size), err + } +} + +func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) { + glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + nv, ok := v.nm.Get(n.Id) + // fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) + if ok && nv.Size.IsValid() { + size := nv.Size + n.Data = nil + n.AppendAtNs = uint64(time.Now().UnixNano()) + offset, _, _, err := n.Append(v.DataBackend, v.Version()) + v.checkReadWriteError(err) + if err != nil { + return size, err + } + v.lastAppendAtNs = n.AppendAtNs + if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil { + return size, err + } + return size, err + } + return 0, nil +} + +func (v *Volume) startWorker() { + go func() { + chanClosed := false + for { + // chan closed. go thread will exit + if chanClosed { + break + } + currentRequests := make([]*needle.AsyncRequest, 0, 128) + currentBytesToWrite := int64(0) + for { + request, ok := <-v.asyncRequestsChan + // volume may be closed + if !ok { + chanClosed = true + break + } + if MaxPossibleVolumeSize < v.ContentSize()+uint64(currentBytesToWrite+request.ActualSize) { + request.Complete(0, 0, false, + fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize())) + break + } + currentRequests = append(currentRequests, request) + currentBytesToWrite += request.ActualSize + // submit at most 4M bytes or 128 requests at one time to decrease request delay. + // it also need to break if there is no data in channel to avoid io hang. + if currentBytesToWrite >= 4*1024*1024 || len(currentRequests) >= 128 || len(v.asyncRequestsChan) == 0 { + break + } + } + if len(currentRequests) == 0 { + continue + } + v.dataFileAccessLock.Lock() + end, _, e := v.DataBackend.GetStat() + if e != nil { + for i := 0; i < len(currentRequests); i++ { + currentRequests[i].Complete(0, 0, false, + fmt.Errorf("cannot read current volume position: %v", e)) + } + v.dataFileAccessLock.Unlock() + continue + } + + for i := 0; i < len(currentRequests); i++ { + if currentRequests[i].IsWriteRequest { + offset, size, isUnchanged, err := v.doWriteRequest(currentRequests[i].N) + currentRequests[i].UpdateResult(offset, uint64(size), isUnchanged, err) + } else { + size, err := v.doDeleteRequest(currentRequests[i].N) + currentRequests[i].UpdateResult(0, uint64(size), false, err) + } + } + + // if sync error, data is not reliable, we should mark the completed request as fail and rollback + if err := v.DataBackend.Sync(); err != nil { + // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic? + if te := v.DataBackend.Truncate(end); te != nil { + glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te) + } + for i := 0; i < len(currentRequests); i++ { + if currentRequests[i].IsSucceed() { + currentRequests[i].UpdateResult(0, 0, false, err) + } + } + } + + for i := 0; i < len(currentRequests); i++ { + currentRequests[i].Submit() + } + v.dataFileAccessLock.Unlock() + } + }() +} + +func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size) error { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(len(needleBlob)) { + return fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize()) + } + + appendAtNs := uint64(time.Now().UnixNano()) + offset, err := needle.WriteNeedleBlob(v.DataBackend, needleBlob, size, appendAtNs, v.Version()) + + v.checkReadWriteError(err) + if err != nil { + return err + } + v.lastAppendAtNs = appendAtNs + + // add to needle map + if err = v.nm.Put(needleId, ToOffset(int64(offset)), size); err != nil { + glog.V(4).Infof("failed to put in needle map %d: %v", needleId, err) + } + + return err +} diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index e5dc48652..39c24ab04 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -24,6 +24,7 @@ func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.Vol Ttl: option.Ttl.String(), Preallocate: option.Prealloacte, MemoryMapMaxSizeMb: option.MemoryMapMaxSizeMb, + DiskType: string(option.DiskType), }) return deleteErr }) diff --git a/weed/topology/collection.go b/weed/topology/collection.go index 7a611d904..a14b68851 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -2,6 +2,7 @@ package topology import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" @@ -11,11 +12,16 @@ import ( type Collection struct { Name string volumeSizeLimit uint64 + replicationAsMin bool storageType2VolumeLayout *util.ConcurrentReadMap } -func NewCollection(name string, volumeSizeLimit uint64) *Collection { - c := &Collection{Name: name, volumeSizeLimit: volumeSizeLimit} +func NewCollection(name string, volumeSizeLimit uint64, replicationAsMin bool) *Collection { + c := &Collection{ + Name: name, + volumeSizeLimit: volumeSizeLimit, + replicationAsMin: replicationAsMin, + } c.storageType2VolumeLayout = util.NewConcurrentReadMap() return c } @@ -24,17 +30,31 @@ func (c *Collection) String() string { return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout) } -func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout { keyString := rp.String() if ttl != nil { keyString += ttl.String() } + if diskType != types.HardDriveType { + keyString += string(diskType) + } vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} { - return NewVolumeLayout(rp, ttl, c.volumeSizeLimit) + return NewVolumeLayout(rp, ttl, diskType, c.volumeSizeLimit, c.replicationAsMin) }) return vl.(*VolumeLayout) } +func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) { + keyString := rp.String() + if ttl != nil { + keyString += ttl.String() + } + if diskType != types.HardDriveType { + keyString += string(diskType) + } + c.storageType2VolumeLayout.Delete(keyString) +} + func (c *Collection) Lookup(vid needle.VolumeId) []*DataNode { for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index dc3accb71..60d91ba6d 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -1,6 +1,8 @@ package topology -import "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +import ( + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) type DataCenter struct { NodeImpl @@ -10,6 +12,7 @@ func NewDataCenter(id string) *DataCenter { dc := &DataCenter{} dc.id = NodeId(id) dc.nodeType = "DataCenter" + dc.diskUsages = newDiskUsages() dc.children = make(map[NodeId]Node) dc.NodeImpl.value = dc return dc @@ -30,8 +33,6 @@ func (dc *DataCenter) GetOrCreateRack(rackName string) *Rack { func (dc *DataCenter) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = dc.Id() - m["Max"] = dc.GetMaxVolumeCount() - m["Free"] = dc.FreeSpace() var racks []interface{} for _, c := range dc.Children() { rack := c.(*Rack) @@ -43,12 +44,8 @@ func (dc *DataCenter) ToMap() interface{} { func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { m := &master_pb.DataCenterInfo{ - Id: string(dc.Id()), - VolumeCount: uint64(dc.GetVolumeCount()), - MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), - FreeVolumeCount: uint64(dc.FreeSpace()), - ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), - RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()), + Id: string(dc.Id()), + DiskInfos: dc.diskUsages.ToDiskInfo(), } for _, c := range dc.Children() { rack := c.(*Rack) diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 617341e54..69f739dd5 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -2,12 +2,11 @@ package topology import ( "fmt" - "strconv" - "sync" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" + "strconv" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" @@ -15,122 +14,161 @@ import ( type DataNode struct { NodeImpl - volumes map[needle.VolumeId]storage.VolumeInfo - Ip string - Port int - PublicUrl string - LastSeen int64 // unix time in seconds - ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo - ecShardsLock sync.RWMutex + Ip string + Port int + PublicUrl string + LastSeen int64 // unix time in seconds } func NewDataNode(id string) *DataNode { - s := &DataNode{} - s.id = NodeId(id) - s.nodeType = "DataNode" - s.volumes = make(map[needle.VolumeId]storage.VolumeInfo) - s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo) - s.NodeImpl.value = s - return s + dn := &DataNode{} + dn.id = NodeId(id) + dn.nodeType = "DataNode" + dn.diskUsages = newDiskUsages() + dn.children = make(map[NodeId]Node) + dn.NodeImpl.value = dn + return dn } func (dn *DataNode) String() string { dn.RLock() defer dn.RUnlock() - return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl) + return fmt.Sprintf("Node:%s, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.Ip, dn.Port, dn.PublicUrl) } -func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew bool) { +func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { dn.Lock() defer dn.Unlock() - if oldV, ok := dn.volumes[v.Id]; !ok { - dn.volumes[v.Id] = v - dn.UpAdjustVolumeCountDelta(1) - if v.IsRemote() { - dn.UpAdjustRemoteVolumeCountDelta(1) - } - if !v.ReadOnly { - dn.UpAdjustActiveVolumeCountDelta(1) - } - dn.UpAdjustMaxVolumeId(v.Id) - isNew = true - } else { - if oldV.IsRemote() != v.IsRemote() { - if v.IsRemote() { - dn.UpAdjustRemoteVolumeCountDelta(1) - } - if oldV.IsRemote() { - dn.UpAdjustRemoteVolumeCountDelta(-1) - } - } - dn.volumes[v.Id] = v + return dn.doAddOrUpdateVolume(v) +} + +func (dn *DataNode) getOrCreateDisk(diskType string) *Disk { + c, found := dn.children[NodeId(diskType)] + if !found { + c = NewDisk(diskType) + dn.doLinkChildNode(c) } - return + disk := c.(*Disk) + return disk +} + +func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { + disk := dn.getOrCreateDisk(v.DiskType) + return disk.AddOrUpdateVolume(v) } -func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes []storage.VolumeInfo) { +// UpdateVolumes detects new/deleted/changed volumes on a volume server +// used in master to notify master clients of these changes. +func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) { + actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo) for _, v := range actualVolumes { actualVolumeMap[v.Id] = v } + dn.Lock() - for vid, v := range dn.volumes { + defer dn.Unlock() + + existingVolumes := dn.getVolumes() + + for _, v := range existingVolumes { + vid := v.Id if _, ok := actualVolumeMap[vid]; !ok { glog.V(0).Infoln("Deleting volume id:", vid) - delete(dn.volumes, vid) + disk := dn.getOrCreateDisk(v.DiskType) + delete(disk.volumes, vid) deletedVolumes = append(deletedVolumes, v) - dn.UpAdjustVolumeCountDelta(-1) + + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType)) + deltaDiskUsage.volumeCount = -1 if v.IsRemote() { - dn.UpAdjustRemoteVolumeCountDelta(-1) + deltaDiskUsage.remoteVolumeCount = -1 } if !v.ReadOnly { - dn.UpAdjustActiveVolumeCountDelta(-1) + deltaDiskUsage.activeVolumeCount = -1 } + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) } } - dn.Unlock() for _, v := range actualVolumes { - isNew := dn.AddOrUpdateVolume(v) + isNew, isChangedRO := dn.doAddOrUpdateVolume(v) if isNew { newVolumes = append(newVolumes, v) } + if isChangedRO { + changeRO = append(changeRO, v) + } } return } -func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.VolumeInfo) { +func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.VolumeInfo) { dn.Lock() + defer dn.Unlock() + for _, v := range deletedVolumes { - delete(dn.volumes, v.Id) - dn.UpAdjustVolumeCountDelta(-1) + disk := dn.getOrCreateDisk(v.DiskType) + delete(disk.volumes, v.Id) + + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType)) + deltaDiskUsage.volumeCount = -1 if v.IsRemote() { - dn.UpAdjustRemoteVolumeCountDelta(-1) + deltaDiskUsage.remoteVolumeCount = -1 } if !v.ReadOnly { - dn.UpAdjustActiveVolumeCountDelta(-1) + deltaDiskUsage.activeVolumeCount = -1 } + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) } - dn.Unlock() - for _, v := range newlVolumes { - dn.AddOrUpdateVolume(v) + for _, v := range newVolumes { + dn.doAddOrUpdateVolume(v) } return } +func (dn *DataNode) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) { + deltaDiskUsages := newDiskUsages() + for diskType, maxVolumeCount := range maxVolumeCounts { + if maxVolumeCount == 0 { + // the volume server may have set the max to zero + continue + } + dt := types.ToDiskType(diskType) + currentDiskUsage := dn.diskUsages.getOrCreateDisk(dt) + if currentDiskUsage.maxVolumeCount == int64(maxVolumeCount) { + continue + } + disk := dn.getOrCreateDisk(dt.String()) + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(dt) + deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsage.maxVolumeCount + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) + } +} + func (dn *DataNode) GetVolumes() (ret []storage.VolumeInfo) { dn.RLock() - for _, v := range dn.volumes { - ret = append(ret, v) + for _, c := range dn.children { + disk := c.(*Disk) + ret = append(ret, disk.GetVolumes()...) } dn.RUnlock() return ret } -func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) { +func (dn *DataNode) GetVolumesById(id needle.VolumeId) (vInfo storage.VolumeInfo, err error) { dn.RLock() defer dn.RUnlock() - vInfo, ok := dn.volumes[id] - if ok { + found := false + for _, c := range dn.children { + disk := c.(*Disk) + vInfo, found = disk.volumes[id] + if found { + break + } + } + if found { return vInfo, nil } else { return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found") @@ -138,7 +176,10 @@ func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, erro } func (dn *DataNode) GetDataCenter() *DataCenter { - return dn.Parent().Parent().(*NodeImpl).value.(*DataCenter) + rack := dn.Parent() + dcNode := rack.Parent() + dcValue := dcNode.GetValue() + return dcValue.(*DataCenter) } func (dn *DataNode) GetRack() *Rack { @@ -165,28 +206,61 @@ func (dn *DataNode) Url() string { func (dn *DataNode) ToMap() interface{} { ret := make(map[string]interface{}) ret["Url"] = dn.Url() - ret["Volumes"] = dn.GetVolumeCount() - ret["EcShards"] = dn.GetEcShardCount() - ret["Max"] = dn.GetMaxVolumeCount() - ret["Free"] = dn.FreeSpace() ret["PublicUrl"] = dn.PublicUrl + + // aggregated volume info + var volumeCount, ecShardCount, maxVolumeCount int64 + var volumeIds string + for _, diskUsage := range dn.diskUsages.usages { + volumeCount += diskUsage.volumeCount + ecShardCount += diskUsage.ecShardCount + maxVolumeCount += diskUsage.maxVolumeCount + } + + for _, disk := range dn.Children() { + d := disk.(*Disk) + volumeIds += " " + d.GetVolumeIds() + } + + ret["Volumes"] = volumeCount + ret["EcShards"] = ecShardCount + ret["Max"] = maxVolumeCount + ret["VolumeIds"] = volumeIds + return ret } func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { m := &master_pb.DataNodeInfo{ - Id: string(dn.Id()), - VolumeCount: uint64(dn.GetVolumeCount()), - MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), - FreeVolumeCount: uint64(dn.FreeSpace()), - ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), - RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()), - } - for _, v := range dn.GetVolumes() { - m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) + Id: string(dn.Id()), + DiskInfos: make(map[string]*master_pb.DiskInfo), } - for _, ecv := range dn.GetEcShards() { - m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage()) + for _, c := range dn.Children() { + disk := c.(*Disk) + m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo() } return m } + +// GetVolumeIds returns the human readable volume ids limited to count of max 100. +func (dn *DataNode) GetVolumeIds() string { + dn.RLock() + defer dn.RUnlock() + existingVolumes := dn.getVolumes() + ids := make([]int, 0, len(existingVolumes)) + + for k := range existingVolumes { + ids = append(ids, int(k)) + } + + return util.HumanReadableIntsMax(100, ids...) +} + +func (dn *DataNode) getVolumes() []storage.VolumeInfo { + var existingVolumes []storage.VolumeInfo + for _, c := range dn.children { + disk := c.(*Disk) + existingVolumes = append(existingVolumes, disk.GetVolumes()...) + } + return existingVolumes +} diff --git a/weed/topology/data_node_ec.go b/weed/topology/data_node_ec.go index 75c8784fe..330b16b24 100644 --- a/weed/topology/data_node_ec.go +++ b/weed/topology/data_node_ec.go @@ -3,12 +3,14 @@ package topology import ( "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) func (dn *DataNode) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) { dn.RLock() - for _, ecVolumeInfo := range dn.ecShards { - ret = append(ret, ecVolumeInfo) + for _, c := range dn.children { + disk := c.(*Disk) + ret = append(ret, disk.GetEcShards()...) } dn.RUnlock() return ret @@ -21,10 +23,17 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) actualEcShardMap[ecShards.VolumeId] = ecShards } - // found out the newShards and deletedShards + existingEcShards := dn.GetEcShards() + + // find out the newShards and deletedShards var newShardCount, deletedShardCount int - dn.ecShardsLock.RLock() - for vid, ecShards := range dn.ecShards { + for _, ecShards := range existingEcShards { + + disk := dn.getOrCreateDisk(ecShards.DiskType) + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(ecShards.DiskType)) + + vid := ecShards.VolumeId if actualEcShards, ok := actualEcShardMap[vid]; !ok { // dn registered ec shards not found in the new set of ec shards deletedShards = append(deletedShards, ecShards) @@ -42,26 +51,60 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) deletedShardCount += d.ShardIdCount() } } + + deltaDiskUsage.ecShardCount = int64(newShardCount - deletedShardCount) + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) + } + for _, ecShards := range actualShards { - if _, found := dn.ecShards[ecShards.VolumeId]; !found { - newShards = append(newShards, ecShards) - newShardCount += ecShards.ShardIdCount() + if dn.hasEcShards(ecShards.VolumeId) { + continue } + + newShards = append(newShards, ecShards) + + disk := dn.getOrCreateDisk(ecShards.DiskType) + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(ecShards.DiskType)) + deltaDiskUsage.ecShardCount = int64(ecShards.ShardIdCount()) + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) } - dn.ecShardsLock.RUnlock() if len(newShards) > 0 || len(deletedShards) > 0 { // if changed, set to the new ec shard map - dn.ecShardsLock.Lock() - dn.ecShards = actualEcShardMap - dn.UpAdjustEcShardCountDelta(int64(newShardCount - deletedShardCount)) - dn.ecShardsLock.Unlock() + dn.doUpdateEcShards(actualShards) } return } +func (dn *DataNode) hasEcShards(volumeId needle.VolumeId) (found bool) { + dn.RLock() + defer dn.RUnlock() + for _, c := range dn.children { + disk := c.(*Disk) + _, found = disk.ecShards[volumeId] + if found { + return + } + } + return +} + +func (dn *DataNode) doUpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) { + dn.Lock() + for _, c := range dn.children { + disk := c.(*Disk) + disk.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo) + } + for _, shard := range actualShards { + disk := dn.getOrCreateDisk(shard.DiskType) + disk.ecShards[shard.VolumeId] = shard + } + dn.Unlock() +} + func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) { for _, newShard := range newShards { @@ -75,61 +118,25 @@ func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_codi } func (dn *DataNode) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) { - dn.ecShardsLock.Lock() - defer dn.ecShardsLock.Unlock() - - delta := 0 - if existing, ok := dn.ecShards[s.VolumeId]; !ok { - dn.ecShards[s.VolumeId] = s - delta = s.ShardBits.ShardIdCount() - } else { - oldCount := existing.ShardBits.ShardIdCount() - existing.ShardBits = existing.ShardBits.Plus(s.ShardBits) - delta = existing.ShardBits.ShardIdCount() - oldCount - } - - dn.UpAdjustEcShardCountDelta(int64(delta)) - + disk := dn.getOrCreateDisk(s.DiskType) + disk.AddOrUpdateEcShard(s) } func (dn *DataNode) DeleteEcShard(s *erasure_coding.EcVolumeInfo) { - dn.ecShardsLock.Lock() - defer dn.ecShardsLock.Unlock() - - if existing, ok := dn.ecShards[s.VolumeId]; ok { - oldCount := existing.ShardBits.ShardIdCount() - existing.ShardBits = existing.ShardBits.Minus(s.ShardBits) - delta := existing.ShardBits.ShardIdCount() - oldCount - dn.UpAdjustEcShardCountDelta(int64(delta)) - if existing.ShardBits.ShardIdCount() == 0 { - delete(dn.ecShards, s.VolumeId) - } - } - + disk := dn.getOrCreateDisk(s.DiskType) + disk.DeleteEcShard(s) } -func (dn *DataNode) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) { +func (dn *DataNode) HasVolumesById(volumeId needle.VolumeId) (hasVolumeId bool) { - // check whether normal volumes has this volume id dn.RLock() - _, ok := dn.volumes[id] - if ok { - hasVolumeId = true - } - dn.RUnlock() - - if hasVolumeId { - return - } - - // check whether ec shards has this volume id - dn.ecShardsLock.RLock() - _, ok = dn.ecShards[id] - if ok { - hasVolumeId = true + defer dn.RUnlock() + for _, c := range dn.children { + disk := c.(*Disk) + if disk.HasVolumesById(volumeId) { + return true + } } - dn.ecShardsLock.RUnlock() - - return + return false } diff --git a/weed/topology/disk.go b/weed/topology/disk.go new file mode 100644 index 000000000..a085f8dff --- /dev/null +++ b/weed/topology/disk.go @@ -0,0 +1,270 @@ +package topology + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" + "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + + "github.com/chrislusf/seaweedfs/weed/storage" +) + +type Disk struct { + NodeImpl + volumes map[needle.VolumeId]storage.VolumeInfo + ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo + ecShardsLock sync.RWMutex +} + +func NewDisk(diskType string) *Disk { + s := &Disk{} + s.id = NodeId(diskType) + s.nodeType = "Disk" + s.diskUsages = newDiskUsages() + s.volumes = make(map[needle.VolumeId]storage.VolumeInfo, 2) + s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo, 2) + s.NodeImpl.value = s + return s +} + +type DiskUsages struct { + sync.RWMutex + usages map[types.DiskType]*DiskUsageCounts +} + +func newDiskUsages() *DiskUsages { + return &DiskUsages{ + usages: make(map[types.DiskType]*DiskUsageCounts), + } +} + +func (d *DiskUsages) negative() *DiskUsages { + d.RLock() + defer d.RUnlock() + t := newDiskUsages() + for diskType, b := range d.usages { + a := t.getOrCreateDisk(diskType) + a.volumeCount = -b.volumeCount + a.remoteVolumeCount = -b.remoteVolumeCount + a.activeVolumeCount = -b.activeVolumeCount + a.ecShardCount = -b.ecShardCount + a.maxVolumeCount = -b.maxVolumeCount + + } + return t +} + +func (d *DiskUsages) ToDiskInfo() map[string]*master_pb.DiskInfo { + ret := make(map[string]*master_pb.DiskInfo) + for diskType, diskUsageCounts := range d.usages { + m := &master_pb.DiskInfo{ + VolumeCount: uint64(diskUsageCounts.volumeCount), + MaxVolumeCount: uint64(diskUsageCounts.maxVolumeCount), + FreeVolumeCount: uint64(diskUsageCounts.maxVolumeCount - diskUsageCounts.volumeCount), + ActiveVolumeCount: uint64(diskUsageCounts.activeVolumeCount), + RemoteVolumeCount: uint64(diskUsageCounts.remoteVolumeCount), + } + ret[string(diskType)] = m + } + return ret +} + +func (d *DiskUsages) FreeSpace() (freeSpace int64) { + d.RLock() + defer d.RUnlock() + for _, diskUsage := range d.usages { + freeSpace += diskUsage.FreeSpace() + } + return +} + +func (d *DiskUsages) GetMaxVolumeCount() (maxVolumeCount int64) { + d.RLock() + defer d.RUnlock() + for _, diskUsage := range d.usages { + maxVolumeCount += diskUsage.maxVolumeCount + } + return +} + +type DiskUsageCounts struct { + volumeCount int64 + remoteVolumeCount int64 + activeVolumeCount int64 + ecShardCount int64 + maxVolumeCount int64 +} + +func (a *DiskUsageCounts) addDiskUsageCounts(b *DiskUsageCounts) { + a.volumeCount += b.volumeCount + a.remoteVolumeCount += b.remoteVolumeCount + a.activeVolumeCount += b.activeVolumeCount + a.ecShardCount += b.ecShardCount + a.maxVolumeCount += b.maxVolumeCount +} + +func (a *DiskUsageCounts) FreeSpace() int64 { + freeVolumeSlotCount := a.maxVolumeCount + a.remoteVolumeCount - a.volumeCount + if a.ecShardCount > 0 { + freeVolumeSlotCount = freeVolumeSlotCount - a.ecShardCount/erasure_coding.DataShardsCount - 1 + } + return freeVolumeSlotCount +} + +func (a *DiskUsageCounts) minus(b *DiskUsageCounts) *DiskUsageCounts { + return &DiskUsageCounts{ + volumeCount: a.volumeCount - b.volumeCount, + remoteVolumeCount: a.remoteVolumeCount - b.remoteVolumeCount, + activeVolumeCount: a.activeVolumeCount - b.activeVolumeCount, + ecShardCount: a.ecShardCount - b.ecShardCount, + maxVolumeCount: a.maxVolumeCount - b.maxVolumeCount, + } +} + +func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts { + du.Lock() + defer du.Unlock() + t, found := du.usages[diskType] + if found { + return t + } + t = &DiskUsageCounts{} + du.usages[diskType] = t + return t +} + +func (d *Disk) String() string { + d.RLock() + defer d.RUnlock() + return fmt.Sprintf("Disk:%s, volumes:%v, ecShards:%v", d.NodeImpl.String(), d.volumes, d.ecShards) +} + +func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { + d.Lock() + defer d.Unlock() + return d.doAddOrUpdateVolume(v) +} + +func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) { + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType)) + if oldV, ok := d.volumes[v.Id]; !ok { + d.volumes[v.Id] = v + deltaDiskUsage.volumeCount = 1 + if v.IsRemote() { + deltaDiskUsage.remoteVolumeCount = 1 + } + if !v.ReadOnly { + deltaDiskUsage.activeVolumeCount = 1 + } + d.UpAdjustMaxVolumeId(v.Id) + d.UpAdjustDiskUsageDelta(deltaDiskUsages) + isNew = true + } else { + if oldV.IsRemote() != v.IsRemote() { + if v.IsRemote() { + deltaDiskUsage.remoteVolumeCount = 1 + } + if oldV.IsRemote() { + deltaDiskUsage.remoteVolumeCount = -1 + } + d.UpAdjustDiskUsageDelta(deltaDiskUsages) + } + isChangedRO = d.volumes[v.Id].ReadOnly != v.ReadOnly + d.volumes[v.Id] = v + } + return +} + +func (d *Disk) GetVolumes() (ret []storage.VolumeInfo) { + d.RLock() + for _, v := range d.volumes { + ret = append(ret, v) + } + d.RUnlock() + return ret +} + +func (d *Disk) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) { + d.RLock() + defer d.RUnlock() + vInfo, ok := d.volumes[id] + if ok { + return vInfo, nil + } else { + return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found") + } +} + +func (d *Disk) GetDataCenter() *DataCenter { + dn := d.Parent() + rack := dn.Parent() + dcNode := rack.Parent() + dcValue := dcNode.GetValue() + return dcValue.(*DataCenter) +} + +func (d *Disk) GetRack() *Rack { + return d.Parent().Parent().(*NodeImpl).value.(*Rack) +} + +func (d *Disk) GetTopology() *Topology { + p := d.Parent() + for p.Parent() != nil { + p = p.Parent() + } + t := p.(*Topology) + return t +} + +func (d *Disk) ToMap() interface{} { + ret := make(map[string]interface{}) + diskUsage := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id()))) + ret["Volumes"] = diskUsage.volumeCount + ret["VolumeIds"] = d.GetVolumeIds() + ret["EcShards"] = diskUsage.ecShardCount + ret["Max"] = diskUsage.maxVolumeCount + ret["Free"] = d.FreeSpace() + return ret +} + +func (d *Disk) FreeSpace() int64 { + t := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id()))) + return t.FreeSpace() +} + +func (d *Disk) ToDiskInfo() *master_pb.DiskInfo { + diskUsage := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id()))) + m := &master_pb.DiskInfo{ + Type: string(d.Id()), + VolumeCount: uint64(diskUsage.volumeCount), + MaxVolumeCount: uint64(diskUsage.maxVolumeCount), + FreeVolumeCount: uint64(diskUsage.maxVolumeCount - diskUsage.volumeCount), + ActiveVolumeCount: uint64(diskUsage.activeVolumeCount), + RemoteVolumeCount: uint64(diskUsage.remoteVolumeCount), + } + for _, v := range d.GetVolumes() { + m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) + } + for _, ecv := range d.GetEcShards() { + m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage()) + } + return m +} + +// GetVolumeIds returns the human readable volume ids limited to count of max 100. +func (d *Disk) GetVolumeIds() string { + d.RLock() + defer d.RUnlock() + ids := make([]int, 0, len(d.volumes)) + + for k := range d.volumes { + ids = append(ids, int(k)) + } + + return util.HumanReadableIntsMax(100, ids...) +} diff --git a/weed/topology/disk_ec.go b/weed/topology/disk_ec.go new file mode 100644 index 000000000..74a06b47f --- /dev/null +++ b/weed/topology/disk_ec.go @@ -0,0 +1,84 @@ +package topology + +import ( + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func (d *Disk) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) { + d.RLock() + for _, ecVolumeInfo := range d.ecShards { + ret = append(ret, ecVolumeInfo) + } + d.RUnlock() + return ret +} + +func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) { + d.ecShardsLock.Lock() + defer d.ecShardsLock.Unlock() + + delta := 0 + if existing, ok := d.ecShards[s.VolumeId]; !ok { + d.ecShards[s.VolumeId] = s + delta = s.ShardBits.ShardIdCount() + } else { + oldCount := existing.ShardBits.ShardIdCount() + existing.ShardBits = existing.ShardBits.Plus(s.ShardBits) + delta = existing.ShardBits.ShardIdCount() - oldCount + } + + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id()))) + deltaDiskUsage.ecShardCount = int64(delta) + d.UpAdjustDiskUsageDelta(deltaDiskUsages) + +} + +func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) { + d.ecShardsLock.Lock() + defer d.ecShardsLock.Unlock() + + if existing, ok := d.ecShards[s.VolumeId]; ok { + oldCount := existing.ShardBits.ShardIdCount() + existing.ShardBits = existing.ShardBits.Minus(s.ShardBits) + delta := existing.ShardBits.ShardIdCount() - oldCount + + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id()))) + deltaDiskUsage.ecShardCount = int64(delta) + d.UpAdjustDiskUsageDelta(deltaDiskUsages) + + if existing.ShardBits.ShardIdCount() == 0 { + delete(d.ecShards, s.VolumeId) + } + } + +} + +func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) { + + // check whether normal volumes has this volume id + d.RLock() + _, ok := d.volumes[id] + if ok { + hasVolumeId = true + } + d.RUnlock() + + if hasVolumeId { + return + } + + // check whether ec shards has this volume id + d.ecShardsLock.RLock() + _, ok = d.ecShards[id] + if ok { + hasVolumeId = true + } + d.ecShardsLock.RUnlock() + + return + +} diff --git a/weed/topology/node.go b/weed/topology/node.go index 572a89d4d..95d63972e 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -2,34 +2,25 @@ package topology import ( "errors" - "math/rand" - "strings" - "sync" - "sync/atomic" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "math/rand" + "strings" + "sync" ) type NodeId string type Node interface { Id() NodeId String() string - FreeSpace() int64 - ReserveOneVolume(r int64) (*DataNode, error) - UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) - UpAdjustVolumeCountDelta(volumeCountDelta int64) - UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) - UpAdjustEcShardCountDelta(ecShardCountDelta int64) - UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) + AvailableSpaceFor(option *VolumeGrowOption) int64 + ReserveOneVolume(r int64, option *VolumeGrowOption) (*DataNode, error) + UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages) UpAdjustMaxVolumeId(vid needle.VolumeId) + GetDiskUsages() *DiskUsages - GetVolumeCount() int64 - GetEcShardCount() int64 - GetActiveVolumeCount() int64 - GetRemoteVolumeCount() int64 - GetMaxVolumeCount() int64 GetMaxVolumeId() needle.VolumeId SetParent(Node) LinkChildNode(node Node) @@ -45,73 +36,81 @@ type Node interface { GetValue() interface{} //get reference to the topology,dc,rack,datanode } type NodeImpl struct { - volumeCount int64 - remoteVolumeCount int64 - activeVolumeCount int64 - ecShardCount int64 - maxVolumeCount int64 - id NodeId - parent Node - sync.RWMutex // lock children - children map[NodeId]Node - maxVolumeId needle.VolumeId + diskUsages *DiskUsages + id NodeId + parent Node + sync.RWMutex // lock children + children map[NodeId]Node + maxVolumeId needle.VolumeId //for rack, data center, topology nodeType string value interface{} } +func (n *NodeImpl) GetDiskUsages() *DiskUsages { + return n.diskUsages +} + // the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot -func (n *NodeImpl) RandomlyPickNodes(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { - candidates := make([]Node, 0, len(n.children)) +func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, option *VolumeGrowOption, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { + var totalWeights int64 var errs []string n.RLock() + candidates := make([]Node, 0, len(n.children)) + candidatesWeights := make([]int64, 0, len(n.children)) + //pick nodes which has enough free volumes as candidates, and use free volumes number as node weight. for _, node := range n.children { - if err := filterFirstNodeFn(node); err == nil { - candidates = append(candidates, node) - } else { - errs = append(errs, string(node.Id())+":"+err.Error()) + if node.AvailableSpaceFor(option) <= 0 { + continue } + totalWeights += node.AvailableSpaceFor(option) + candidates = append(candidates, node) + candidatesWeights = append(candidatesWeights, node.AvailableSpaceFor(option)) } n.RUnlock() - if len(candidates) == 0 { - return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) + if len(candidates) < numberOfNodes { + glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + return nil, nil, errors.New("No enough data node found!") } - firstNode = candidates[rand.Intn(len(candidates))] - glog.V(2).Infoln(n.Id(), "picked main node:", firstNode.Id()) - restNodes = make([]Node, numberOfNodes-1) - candidates = candidates[:0] - n.RLock() - for _, node := range n.children { - if node.Id() == firstNode.Id() { - continue - } - if node.FreeSpace() <= 0 { - continue + //pick nodes randomly by weights, the node picked earlier has higher final weights + sortedCandidates := make([]Node, 0, len(candidates)) + for i := 0; i < len(candidates); i++ { + weightsInterval := rand.Int63n(totalWeights) + lastWeights := int64(0) + for k, weights := range candidatesWeights { + if (weightsInterval >= lastWeights) && (weightsInterval < lastWeights+weights) { + sortedCandidates = append(sortedCandidates, candidates[k]) + candidatesWeights[k] = 0 + totalWeights -= weights + break + } + lastWeights += weights } - glog.V(2).Infoln("select rest node candidate:", node.Id()) - candidates = append(candidates, node) } - n.RUnlock() - glog.V(2).Infoln(n.Id(), "picking", numberOfNodes-1, "from rest", len(candidates), "node candidates") - ret := len(restNodes) == 0 - for k, node := range candidates { - if k < len(restNodes) { - restNodes[k] = node - if k == len(restNodes)-1 { - ret = true + + restNodes = make([]Node, 0, numberOfNodes-1) + ret := false + n.RLock() + for k, node := range sortedCandidates { + if err := filterFirstNodeFn(node); err == nil { + firstNode = node + if k >= numberOfNodes-1 { + restNodes = sortedCandidates[:numberOfNodes-1] + } else { + restNodes = append(restNodes, sortedCandidates[:k]...) + restNodes = append(restNodes, sortedCandidates[k+1:numberOfNodes]...) } + ret = true + break } else { - r := rand.Intn(k + 1) - if r < len(restNodes) { - restNodes[r] = node - } + errs = append(errs, string(node.Id())+":"+err.Error()) } } + n.RUnlock() if !ret { - glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes-1, "from rest", len(candidates), "node candidates") - err = errors.New("No enough data node found!") + return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) } return } @@ -134,10 +133,14 @@ func (n *NodeImpl) String() string { func (n *NodeImpl) Id() NodeId { return n.id } -func (n *NodeImpl) FreeSpace() int64 { - freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount - if n.ecShardCount > 0 { - freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 +func (n *NodeImpl) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts { + return n.diskUsages.getOrCreateDisk(diskType) +} +func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 { + t := n.getOrCreateDisk(option.DiskType) + freeVolumeSlotCount := t.maxVolumeCount + t.remoteVolumeCount - t.volumeCount + if t.ecShardCount > 0 { + freeVolumeSlotCount = freeVolumeSlotCount - t.ecShardCount/erasure_coding.DataShardsCount - 1 } return freeVolumeSlotCount } @@ -158,11 +161,11 @@ func (n *NodeImpl) Parent() Node { func (n *NodeImpl) GetValue() interface{} { return n.value } -func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) { +func (n *NodeImpl) ReserveOneVolume(r int64, option *VolumeGrowOption) (assignedNode *DataNode, err error) { n.RLock() defer n.RUnlock() for _, node := range n.children { - freeSpace := node.FreeSpace() + freeSpace := node.AvailableSpaceFor(option) // fmt.Println("r =", r, ", node =", node, ", freeSpace =", freeSpace) if freeSpace <= 0 { continue @@ -170,11 +173,11 @@ func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) if r >= freeSpace { r -= freeSpace } else { - if node.IsDataNode() && node.FreeSpace() > 0 { + if node.IsDataNode() && node.AvailableSpaceFor(option) > 0 { // fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace()) return node.(*DataNode), nil } - assignedNode, err = node.ReserveOneVolume(r) + assignedNode, err = node.ReserveOneVolume(r, option) if err == nil { return } @@ -183,34 +186,13 @@ func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) return nil, errors.New("No free volume slot found!") } -func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative - atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta) - if n.parent != nil { - n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) - } -} -func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative - atomic.AddInt64(&n.volumeCount, volumeCountDelta) - if n.parent != nil { - n.parent.UpAdjustVolumeCountDelta(volumeCountDelta) +func (n *NodeImpl) UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages) { //can be negative + for diskType, diskUsage := range deltaDiskUsages.usages { + existingDisk := n.getOrCreateDisk(diskType) + existingDisk.addDiskUsageCounts(diskUsage) } -} -func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative - atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta) if n.parent != nil { - n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) - } -} -func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative - atomic.AddInt64(&n.ecShardCount, ecShardCountDelta) - if n.parent != nil { - n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta) - } -} -func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative - atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta) - if n.parent != nil { - n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta) + n.parent.UpAdjustDiskUsageDelta(deltaDiskUsages) } } func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative @@ -224,33 +206,18 @@ func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId { return n.maxVolumeId } -func (n *NodeImpl) GetVolumeCount() int64 { - return n.volumeCount -} -func (n *NodeImpl) GetEcShardCount() int64 { - return n.ecShardCount -} -func (n *NodeImpl) GetRemoteVolumeCount() int64 { - return n.remoteVolumeCount -} -func (n *NodeImpl) GetActiveVolumeCount() int64 { - return n.activeVolumeCount -} -func (n *NodeImpl) GetMaxVolumeCount() int64 { - return n.maxVolumeCount -} func (n *NodeImpl) LinkChildNode(node Node) { n.Lock() defer n.Unlock() + n.doLinkChildNode(node) +} + +func (n *NodeImpl) doLinkChildNode(node Node) { if n.children[node.Id()] == nil { n.children[node.Id()] = node - n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount()) + n.UpAdjustDiskUsageDelta(node.GetDiskUsages()) n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) - n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) - n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount()) - n.UpAdjustEcShardCountDelta(node.GetEcShardCount()) - n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) node.SetParent(n) glog.V(0).Infoln(n, "adds child", node.Id()) } @@ -263,11 +230,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { if node != nil { node.SetParent(nil) delete(n.children, node.Id()) - n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) - n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount()) - n.UpAdjustEcShardCountDelta(-node.GetEcShardCount()) - n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) - n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) + n.UpAdjustDiskUsageDelta(node.GetDiskUsages().negative()) glog.V(0).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 1921c0c05..8eb2a717c 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -2,6 +2,7 @@ package topology import ( "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/types" "strconv" "time" ) @@ -14,6 +15,7 @@ func NewRack(id string) *Rack { r := &Rack{} r.id = NodeId(id) r.nodeType = "Rack" + r.diskUsages = newDiskUsages() r.children = make(map[NodeId]Node) r.NodeImpl.value = r return r @@ -28,7 +30,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode { } return nil } -func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64) *DataNode { +func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCounts map[string]uint32) *DataNode { for _, c := range r.Children() { dn := c.(*DataNode) if dn.MatchLocation(ip, port) { @@ -40,17 +42,19 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol dn.Ip = ip dn.Port = port dn.PublicUrl = publicUrl - dn.maxVolumeCount = maxVolumeCount dn.LastSeen = time.Now().Unix() r.LinkChildNode(dn) + for diskType, maxVolumeCount := range maxVolumeCounts { + disk := NewDisk(diskType) + disk.diskUsages.getOrCreateDisk(types.ToDiskType(diskType)).maxVolumeCount = int64(maxVolumeCount) + dn.LinkChildNode(disk) + } return dn } func (r *Rack) ToMap() interface{} { m := make(map[string]interface{}) m["Id"] = r.Id() - m["Max"] = r.GetMaxVolumeCount() - m["Free"] = r.FreeSpace() var dns []interface{} for _, c := range r.Children() { dn := c.(*DataNode) @@ -62,12 +66,8 @@ func (r *Rack) ToMap() interface{} { func (r *Rack) ToRackInfo() *master_pb.RackInfo { m := &master_pb.RackInfo{ - Id: string(r.Id()), - VolumeCount: uint64(r.GetVolumeCount()), - MaxVolumeCount: uint64(r.GetMaxVolumeCount()), - FreeVolumeCount: uint64(r.FreeSpace()), - ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), - RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()), + Id: string(r.Id()), + DiskInfos: r.diskUsages.ToDiskInfo(), } for _, c := range r.Children() { dn := c.(*DataNode) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index b195b48ed..ea0a8c968 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -1,7 +1,6 @@ package topology import ( - "bytes" "encoding/json" "errors" "fmt" @@ -15,30 +14,39 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) -func ReplicatedWrite(masterNode string, s *storage.Store, - volumeId needle.VolumeId, n *needle.Needle, - r *http.Request) (size uint32, isUnchanged bool, err error) { +func ReplicatedWrite(masterFn operation.GetMasterFn, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request) (isUnchanged bool, err error) { //check JWT jwt := security.GetJwt(r) + // check whether this is a replicated write request var remoteLocations []operation.Location if r.FormValue("type") != "replicate" { - remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode) + // this is the initial request + remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterFn) if err != nil { glog.V(0).Infoln(err) return } } - size, isUnchanged, err = s.WriteVolumeNeedle(volumeId, n) - if err != nil { - err = fmt.Errorf("failed to write to local disk: %v", err) - glog.V(0).Infoln(err) - return + // read fsync value + fsync := false + if r.FormValue("fsync") == "true" { + fsync = true + } + + if s.GetVolume(volumeId) != nil { + isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync) + if err != nil { + err = fmt.Errorf("failed to write to local disk: %v", err) + glog.V(0).Infoln(err) + return + } } if len(remoteLocations) > 0 { //send to other replica locations @@ -72,12 +80,11 @@ func ReplicatedWrite(masterNode string, s *storage.Store, } } - _, err := operation.Upload(u.String(), - string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), - pairMap, jwt) + // volume server do not know about encryption + // TODO optimize here to compress data only once + _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsCompressed(), string(n.Mime), pairMap, jwt) return err }); err != nil { - size = 0 err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) glog.V(0).Infoln(err) } @@ -85,16 +92,16 @@ func ReplicatedWrite(masterNode string, s *storage.Store, return } -func ReplicatedDelete(masterNode string, store *storage.Store, +func ReplicatedDelete(masterFn operation.GetMasterFn, store *storage.Store, volumeId needle.VolumeId, n *needle.Needle, - r *http.Request) (size uint32, err error) { + r *http.Request) (size types.Size, err error) { //check JWT jwt := security.GetJwt(r) var remoteLocations []operation.Location if r.FormValue("type") != "replicate" { - remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterNode) + remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterFn) if err != nil { glog.V(0).Infoln(err) return @@ -154,25 +161,34 @@ func distributedOperation(locations []operation.Location, store *storage.Store, return ret.Error() } -func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( +func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterFn operation.GetMasterFn) ( remoteLocations []operation.Location, err error) { - copyCount := s.GetVolume(volumeId).ReplicaPlacement.GetCopyCount() - if copyCount > 1 { - if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { - if len(lookupResult.Locations) < copyCount { - err = fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", - len(lookupResult.Locations), copyCount) - return - } - selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) - for _, location := range lookupResult.Locations { - if location.Url != selfUrl { - remoteLocations = append(remoteLocations, location) - } + + v := s.GetVolume(volumeId) + if v != nil && v.ReplicaPlacement.GetCopyCount() == 1 { + return + } + + // not on local store, or has replications + lookupResult, lookupErr := operation.Lookup(masterFn, volumeId.String()) + if lookupErr == nil { + selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) + for _, location := range lookupResult.Locations { + if location.Url != selfUrl { + remoteLocations = append(remoteLocations, location) } - } else { - err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) - return + } + } else { + err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) + return + } + + if v != nil { + // has one local and has remote replications + copyCount := v.ReplicaPlacement.GetCopyCount() + if len(lookupResult.Locations) < copyCount { + err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", + len(lookupResult.Locations), volumeId, copyCount) } } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index e6cb44727..08ebd24fd 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -3,8 +3,10 @@ package topology import ( "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "math/rand" "sync" + "time" "github.com/chrislusf/raft" @@ -27,7 +29,8 @@ type Topology struct { pulse int64 - volumeSizeLimit uint64 + volumeSizeLimit uint64 + replicationAsMin bool Sequence sequence.Sequencer @@ -38,16 +41,18 @@ type Topology struct { RaftServer raft.Server } -func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int) *Topology { +func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology { t := &Topology{} t.id = NodeId(id) t.nodeType = "Topology" t.NodeImpl.value = t + t.diskUsages = newDiskUsages() t.children = make(map[NodeId]Node) t.collectionMap = util.NewConcurrentReadMap() t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations) t.pulse = int64(pulse) t.volumeSizeLimit = volumeSizeLimit + t.replicationAsMin = replicationAsMin t.Sequence = seq @@ -60,29 +65,32 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls func (t *Topology) IsLeader() bool { if t.RaftServer != nil { - return t.RaftServer.State() == raft.Leader + if t.RaftServer.State() == raft.Leader { + return true + } } return false } func (t *Topology) Leader() (string, error) { l := "" - if t.RaftServer != nil { - l = t.RaftServer.Leader() - } else { - return "", errors.New("Raft Server not ready yet!") - } - - if l == "" { - // We are a single node cluster, we are the leader - return t.RaftServer.Name(), errors.New("Raft Server not initialized!") + for count := 0; count < 3; count++ { + if t.RaftServer != nil { + l = t.RaftServer.Leader() + } else { + return "", errors.New("Raft Server not ready yet!") + } + if l != "" { + break + } else { + time.Sleep(time.Duration(5+count) * time.Second) + } } - return l, nil } func (t *Topology) Lookup(collection string, vid needle.VolumeId) (dataNodes []*DataNode) { - //maybe an issue if lots of collections? + // maybe an issue if lots of collections? if collection == "" { for _, c := range t.collectionMap.Items() { if list := c.(*Collection).Lookup(vid); list != nil { @@ -115,12 +123,12 @@ func (t *Topology) NextVolumeId() (needle.VolumeId, error) { } func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool { - vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl) + vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType) return vl.GetActiveVolumeCount(option) > 0 } func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) { - vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option) + vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType).PickForWrite(count, option) if err != nil { return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err) } @@ -131,10 +139,10 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil } -func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout { return t.collectionMap.Get(collectionName, func() interface{} { - return NewCollection(collectionName, t.volumeSizeLimit) - }).(*Collection).GetOrCreateVolumeLayout(rp, ttl) + return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin) + }).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType) } func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) { @@ -152,7 +160,7 @@ func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) t.ecShardMapLock.RUnlock() } - for k, _ := range mapOfCollections { + for k := range mapOfCollections { ret = append(ret, k) } return ret @@ -170,15 +178,30 @@ func (t *Topology) DeleteCollection(collectionName string) { t.collectionMap.Delete(collectionName) } +func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) { + collection, found := t.FindCollection(collectionName) + if !found { + return + } + collection.DeleteVolumeLayout(rp, ttl, diskType) + if len(collection.storageType2VolumeLayout.Items()) == 0 { + t.DeleteCollection(collectionName) + } +} + func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl).RegisterVolume(&v, dn) + diskType := types.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) + vl.RegisterVolume(&v, dn) + vl.EnsureCorrectWritables(&v) } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - glog.Infof("removing volume info:%+v", v) - volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + glog.Infof("removing volume info: %+v", v) + diskType := types.ToDiskType(v.DiskType) + volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) volumeLayout.UnRegisterVolume(&v, dn) if volumeLayout.isEmpty() { - t.DeleteCollection(v.Collection) + t.DeleteLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) } } @@ -205,13 +228,19 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati } } // find out the delta volumes - newVolumes, deletedVolumes = dn.UpdateVolumes(volumeInfos) + var changedVolumes []storage.VolumeInfo + newVolumes, deletedVolumes, changedVolumes = dn.UpdateVolumes(volumeInfos) for _, v := range newVolumes { t.RegisterVolumeLayout(v, dn) } for _, v := range deletedVolumes { t.UnRegisterVolumeLayout(v, dn) } + for _, v := range changedVolumes { + diskType := types.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) + vl.EnsureCorrectWritables(&v) + } return } diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 93b39bb5d..022eeb578 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -18,6 +18,7 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf for _, shardInfo := range shardInfos { shards = append(shards, erasure_coding.NewEcVolumeInfo( + shardInfo.DiskType, shardInfo.Collection, needle.VolumeId(shardInfo.Id), erasure_coding.ShardBits(shardInfo.EcIndexBits))) @@ -39,6 +40,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards for _, shardInfo := range newEcShards { newShards = append(newShards, erasure_coding.NewEcVolumeInfo( + shardInfo.DiskType, shardInfo.Collection, needle.VolumeId(shardInfo.Id), erasure_coding.ShardBits(shardInfo.EcIndexBits))) @@ -46,6 +48,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards for _, shardInfo := range deletedEcShards { deletedShards = append(deletedShards, erasure_coding.NewEcVolumeInfo( + shardInfo.DiskType, shardInfo.Collection, needle.VolumeId(shardInfo.Id), erasure_coding.ShardBits(shardInfo.EcIndexBits))) diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 068bd401e..543dacf29 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -1,6 +1,7 @@ package topology import ( + "github.com/chrislusf/seaweedfs/weed/storage/types" "google.golang.org/grpc" "math/rand" "time" @@ -37,7 +38,8 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g }() } func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { - vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl) + diskType := types.ToDiskType(volumeInfo.DiskType) + vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType) if !vl.SetVolumeCapacityFull(volumeInfo.Id) { return false } @@ -47,7 +49,13 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { for _, dn := range vl.vid2location[volumeInfo.Id].list { if !volumeInfo.ReadOnly { - dn.UpAdjustActiveVolumeCountDelta(-1) + + disk := dn.getOrCreateDisk(volumeInfo.DiskType) + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(volumeInfo.DiskType)) + deltaDiskUsage.activeVolumeCount = -1 + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) + } } return true @@ -55,13 +63,14 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { func (t *Topology) UnRegisterDataNode(dn *DataNode) { for _, v := range dn.GetVolumes() { glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) + diskType := types.ToDiskType(v.DiskType) + vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.SetVolumeUnavailable(dn, v.Id) } - dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) - dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount()) - dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount()) - dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) + + negativeUsages := dn.GetDiskUsages().negative() + dn.UpAdjustDiskUsageDelta(negativeUsages) + if dn.Parent() != nil { dn.Parent().UnlinkChildNode(dn.Id()) } diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 73c55d77d..0fedb6221 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -4,8 +4,8 @@ import "github.com/chrislusf/seaweedfs/weed/pb/master_pb" func (t *Topology) ToMap() interface{} { m := make(map[string]interface{}) - m["Max"] = t.GetMaxVolumeCount() - m["Free"] = t.FreeSpace() + m["Max"] = t.diskUsages.GetMaxVolumeCount() + m["Free"] = t.diskUsages.FreeSpace() var dcs []interface{} for _, c := range t.Children() { dc := c.(*DataCenter) @@ -29,8 +29,8 @@ func (t *Topology) ToMap() interface{} { func (t *Topology) ToVolumeMap() interface{} { m := make(map[string]interface{}) - m["Max"] = t.GetMaxVolumeCount() - m["Free"] = t.FreeSpace() + m["Max"] = t.diskUsages.GetMaxVolumeCount() + m["Free"] = t.diskUsages.FreeSpace() dcs := make(map[NodeId]interface{}) for _, c := range t.Children() { dc := c.(*DataCenter) @@ -80,12 +80,8 @@ func (t *Topology) ToVolumeLocations() (volumeLocations []*master_pb.VolumeLocat func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { m := &master_pb.TopologyInfo{ - Id: string(t.Id()), - VolumeCount: uint64(t.GetVolumeCount()), - MaxVolumeCount: uint64(t.GetMaxVolumeCount()), - FreeVolumeCount: uint64(t.FreeSpace()), - ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), - RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()), + Id: string(t.Id()), + DiskInfos: t.diskUsages.ToDiskInfo(), } for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index e7676ccf7..ecfe9d8d1 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" "testing" ) @@ -13,21 +14,24 @@ import ( func TestRemoveDataCenter(t *testing.T) { topo := setup(topologyLayout) topo.UnlinkChildNode(NodeId("dc2")) - if topo.GetActiveVolumeCount() != 15 { + if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 15 { t.Fail() } topo.UnlinkChildNode(NodeId("dc3")) - if topo.GetActiveVolumeCount() != 12 { + if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 12 { t.Fail() } } func TestHandlingVolumeServerHeartbeat(t *testing.T) { - topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5) + topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false) dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + maxVolumeCounts := make(map[string]uint32) + maxVolumeCounts[""] = 25 + maxVolumeCounts["ssd"] = 12 + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", maxVolumeCounts) { volumeCount := 7 @@ -48,10 +52,30 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { volumeMessages = append(volumeMessages, volumeMessage) } + for k := 1; k <= volumeCount; k++ { + volumeMessage := &master_pb.VolumeInformationMessage{ + Id: uint32(volumeCount + k), + Size: uint64(25432), + Collection: "", + FileCount: uint64(2343), + DeleteCount: uint64(345), + DeletedByteCount: 34524, + ReadOnly: false, + ReplicaPlacement: uint32(0), + Version: uint32(needle.CurrentVersion), + Ttl: 0, + DiskType: "ssd", + } + volumeMessages = append(volumeMessages, volumeMessage) + } + topo.SyncDataNodeRegistration(volumeMessages, dn) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) - assert(t, "volumeCount", int(topo.volumeCount), volumeCount) + usageCounts := topo.diskUsages.usages[types.HardDriveType] + + assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount) + assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount) + assert(t, "ssdVolumeCount", int(topo.diskUsages.usages[types.SsdType].volumeCount), volumeCount) } { @@ -78,8 +102,10 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { //layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL) //assert(t, "writables", len(layout.writables), volumeCount) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) - assert(t, "volumeCount", int(topo.volumeCount), volumeCount) + usageCounts := topo.diskUsages.usages[types.HardDriveType] + + assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount) + assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount) } { @@ -96,26 +122,28 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { nil, dn) rp, _ := super_block.NewReplicaPlacementFromString("000") - layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL) + layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, types.HardDriveType) assert(t, "writables after repeated add", len(layout.writables), volumeCount) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) - assert(t, "volumeCount", int(topo.volumeCount), volumeCount) + usageCounts := topo.diskUsages.usages[types.HardDriveType] + + assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount) + assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount) topo.IncrementalSyncDataNodeRegistration( nil, []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage}, dn) assert(t, "writables after deletion", len(layout.writables), volumeCount-1) - assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount-1) - assert(t, "volumeCount", int(topo.volumeCount), volumeCount-1) + assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount-1) + assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount-1) topo.IncrementalSyncDataNodeRegistration( []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage}, nil, dn) - for vid, _ := range layout.vid2location { + for vid := range layout.vid2location { println("after add volume id", vid) } for _, vid := range layout.writables { @@ -128,7 +156,9 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { topo.UnRegisterDataNode(dn) - assert(t, "activeVolumeCount2", int(topo.activeVolumeCount), 0) + usageCounts := topo.diskUsages.usages[types.HardDriveType] + + assert(t, "activeVolumeCount2", int(usageCounts.activeVolumeCount), 0) } @@ -140,16 +170,20 @@ func assert(t *testing.T, message string, actual, expected int) { func TestAddRemoveVolume(t *testing.T) { - topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5) + topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false) dc := topo.GetOrCreateDataCenter("dc1") rack := dc.GetOrCreateRack("rack1") - dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25) + maxVolumeCounts := make(map[string]uint32) + maxVolumeCounts[""] = 25 + maxVolumeCounts["ssd"] = 12 + dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", maxVolumeCounts) v := storage.VolumeInfo{ Id: needle.VolumeId(1), Size: 100, Collection: "xcollection", + DiskType: "ssd", FileCount: 123, DeleteCount: 23, DeletedByteCount: 45, diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index ca626e973..9feb55b73 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -5,15 +5,16 @@ import ( "sync/atomic" "time" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) -func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, +func (t *Topology) batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vid needle.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) { ch := make(chan int, locationlist.Length()) errCount := int32(0) @@ -41,19 +42,23 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi }(index, dn.Url(), vid) } vacuumLocationList := NewVolumeLocationList() + + waitTimeout := time.NewTimer(time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1)) + defer waitTimeout.Stop() + for range locationlist.list { select { case index := <-ch: if index != -1 { vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index]) } - case <-time.After(30 * time.Minute): + case <-waitTimeout.C: return vacuumLocationList, false } } return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0 } -func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, +func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { vl.accessLock.Lock() vl.removeFromWritable(vid) @@ -65,7 +70,8 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ - VolumeId: uint32(vid), + VolumeId: uint32(vid), + Preallocate: preallocate, }) return err }) @@ -79,24 +85,32 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, }(index, dn.Url(), vid) } isVacuumSuccess := true + + waitTimeout := time.NewTimer(3 * time.Minute * time.Duration(t.volumeSizeLimit/1024/1024/1000+1)) + defer waitTimeout.Stop() + for range locationlist.list { select { case canCommit := <-ch: isVacuumSuccess = isVacuumSuccess && canCommit - case <-time.After(30 * time.Minute): + case <-waitTimeout.C: return false } } return isVacuumSuccess } -func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool { +func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool { isCommitSuccess := true + isReadOnly := false for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ + resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), }) + if resp != nil && resp.IsReadOnly { + isReadOnly = true + } return err }) if err != nil { @@ -105,13 +119,15 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v } else { glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) } - if isCommitSuccess { - vl.SetVolumeAvailable(dn, vid) + } + if isCommitSuccess { + for _, dn := range locationlist.list { + vl.SetVolumeAvailable(dn, vid, isReadOnly) } } return isCommitSuccess } -func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { +func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { @@ -128,12 +144,12 @@ func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, } } -func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) int { +func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) { // if there is vacuum going on, return immediately swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1) if !swapped { - return 0 + return } defer atomic.StoreInt64(&t.vacuumLockCounter, 0) @@ -145,39 +161,37 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { volumeLayout := vl.(*VolumeLayout) - vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) + t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) } } } - return 0 } -func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) { +func (t *Topology) vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) { volumeLayout.accessLock.RLock() tmpMap := make(map[needle.VolumeId]*VolumeLocationList) for vid, locationList := range volumeLayout.vid2location { - tmpMap[vid] = locationList + tmpMap[vid] = locationList.Copy() } volumeLayout.accessLock.RUnlock() for vid, locationList := range tmpMap { volumeLayout.accessLock.RLock() - isReadOnly, hasValue := volumeLayout.readonlyVolumes[vid] + isReadOnly := volumeLayout.readonlyVolumes.IsTrue(vid) volumeLayout.accessLock.RUnlock() - if hasValue && isReadOnly { + if isReadOnly { continue } glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) - if vacuumLocationList, needVacuum := batchVacuumVolumeCheck( - grpcDialOption, volumeLayout, vid, locationList, garbageThreshold); needVacuum { - if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { - batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList) + if vacuumLocationList, needVacuum := t.batchVacuumVolumeCheck(grpcDialOption, vid, locationList, garbageThreshold); needVacuum { + if t.batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { + t.batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList) } else { - batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList) + t.batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList) } } } diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 80fbc86cd..8941a049b 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -1,12 +1,15 @@ package topology import ( + "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "math/rand" "sync" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" @@ -23,14 +26,15 @@ This package is created to resolve these replica placement issues: */ type VolumeGrowOption struct { - Collection string - ReplicaPlacement *super_block.ReplicaPlacement - Ttl *needle.TTL - Prealloacte int64 - DataCenter string - Rack string - DataNode string - MemoryMapMaxSizeMb uint32 + Collection string `json:"collection,omitempty"` + ReplicaPlacement *super_block.ReplicaPlacement `json:"replication,omitempty"` + Ttl *needle.TTL `json:"ttl,omitempty"` + DiskType types.DiskType `json:"disk,omitempty"` + Prealloacte int64 `json:"prealloacte,omitempty"` + DataCenter string `json:"dataCenter,omitempty"` + Rack string `json:"rack,omitempty"` + DataNode string `json:"dataNode,omitempty"` + MemoryMapMaxSizeMb uint32 `json:"memoryMapMaxSizeMb,omitempty"` } type VolumeGrowth struct { @@ -38,7 +42,8 @@ type VolumeGrowth struct { } func (o *VolumeGrowOption) String() string { - return fmt.Sprintf("Collection:%s, ReplicaPlacement:%v, Ttl:%v, DataCenter:%s, Rack:%s, DataNode:%s", o.Collection, o.ReplicaPlacement, o.Ttl, o.DataCenter, o.Rack, o.DataNode) + blob, _ := json.Marshal(o) + return string(blob) } func NewDefaultVolumeGrowth() *VolumeGrowth { @@ -48,15 +53,20 @@ func NewDefaultVolumeGrowth() *VolumeGrowth { // one replication type may need rp.GetCopyCount() actual volumes // given copyCount, how many logical volumes to create func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { + v := util.GetViper() + v.SetDefault("master.volume_growth.copy_1", 7) + v.SetDefault("master.volume_growth.copy_2", 6) + v.SetDefault("master.volume_growth.copy_3", 3) + v.SetDefault("master.volume_growth.copy_other", 1) switch copyCount { case 1: - count = 7 + count = v.GetInt("master.volume_growth.copy_1") case 2: - count = 6 + count = v.GetInt("master.volume_growth.copy_2") case 3: - count = 3 + count = v.GetInt("master.volume_growth.copy_3") default: - count = 1 + count = v.GetInt("master.volume_growth.copy_other") } return } @@ -79,6 +89,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil { counter += c } else { + glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e) return counter, e } } @@ -106,21 +117,21 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) { //find main datacenter and other data centers rp := option.ReplicaPlacement - mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node Node) error { + mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, option, func(node Node) error { if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) { return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter) } if len(node.Children()) < rp.DiffRackCount+1 { return fmt.Errorf("Only has %d racks, not enough for %d.", len(node.Children()), rp.DiffRackCount+1) } - if node.FreeSpace() < int64(rp.DiffRackCount+rp.SameRackCount+1) { - return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.DiffRackCount+rp.SameRackCount+1) + if node.AvailableSpaceFor(option) < int64(rp.DiffRackCount+rp.SameRackCount+1) { + return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.DiffRackCount+rp.SameRackCount+1) } possibleRacksCount := 0 for _, rack := range node.Children() { possibleDataNodesCount := 0 for _, n := range rack.Children() { - if n.FreeSpace() >= 1 { + if n.AvailableSpaceFor(option) >= 1 { possibleDataNodesCount++ } } @@ -138,12 +149,12 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node Node) error { + mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, option, func(node Node) error { if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) { return fmt.Errorf("Not matching preferred rack:%s", option.Rack) } - if node.FreeSpace() < int64(rp.SameRackCount+1) { - return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.SameRackCount+1) + if node.AvailableSpaceFor(option) < int64(rp.SameRackCount+1) { + return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.SameRackCount+1) } if len(node.Children()) < rp.SameRackCount+1 { // a bit faster way to test free racks @@ -151,7 +162,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } possibleDataNodesCount := 0 for _, n := range node.Children() { - if n.FreeSpace() >= 1 { + if n.AvailableSpaceFor(option) >= 1 { possibleDataNodesCount++ } } @@ -165,12 +176,12 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainServer, otherServers, serverErr := mainRack.(*Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node Node) error { + mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, option, func(node Node) error { if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) { return fmt.Errorf("Not matching preferred data node:%s", option.DataNode) } - if node.FreeSpace() < 1 { - return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), 1) + if node.AvailableSpaceFor(option) < 1 { + return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), 1) } return nil }) @@ -183,16 +194,16 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum servers = append(servers, server.(*DataNode)) } for _, rack := range otherRacks { - r := rand.Int63n(rack.FreeSpace()) - if server, e := rack.ReserveOneVolume(r); e == nil { + r := rand.Int63n(rack.AvailableSpaceFor(option)) + if server, e := rack.ReserveOneVolume(r, option); e == nil { servers = append(servers, server) } else { return servers, e } } for _, datacenter := range otherDataCenters { - r := rand.Int63n(datacenter.FreeSpace()) - if server, e := datacenter.ReserveOneVolume(r); e == nil { + r := rand.Int63n(datacenter.AvailableSpaceFor(option)) + if server, e := datacenter.ReserveOneVolume(r, option); e == nil { servers = append(servers, server) } else { return servers, e @@ -211,6 +222,7 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid ReplicaPlacement: option.ReplicaPlacement, Ttl: option.Ttl, Version: needle.CurrentVersion, + DiskType: string(option.DiskType), } server.AddOrUpdateVolume(vi) topo.RegisterVolumeLayout(vi, server) diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index e3c5cc580..ab30cd43f 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -81,7 +81,7 @@ func setup(topologyLayout string) *Topology { fmt.Println("data:", data) //need to connect all nodes first before server adding volumes - topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5) + topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false) mTopology := data.(map[string]interface{}) for dcKey, dcValue := range mTopology { dc := NewDataCenter(dcKey) @@ -103,7 +103,13 @@ func setup(topologyLayout string) *Topology { Version: needle.CurrentVersion} server.AddOrUpdateVolume(vi) } - server.UpAdjustMaxVolumeCountDelta(int64(serverMap["limit"].(float64))) + + disk := server.getOrCreateDisk("") + deltaDiskUsages := newDiskUsages() + deltaDiskUsage := deltaDiskUsages.getOrCreateDisk("") + deltaDiskUsage.maxVolumeCount = int64(serverMap["limit"].(float64)) + disk.UpAdjustDiskUsageDelta(deltaDiskUsages) + } } } @@ -131,3 +137,212 @@ func TestFindEmptySlotsForOneVolume(t *testing.T) { fmt.Println("assigned node :", server.Id()) } } + +var topologyLayout2 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[ + {"id":1, "size":12312}, + {"id":2, "size":12312}, + {"id":3, "size":12312} + ], + "limit":300 + }, + "server112":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server113":{ + "volumes":[], + "limit":300 + }, + "server114":{ + "volumes":[], + "limit":300 + }, + "server115":{ + "volumes":[], + "limit":300 + }, + "server116":{ + "volumes":[], + "limit":300 + } + }, + "rack2":{ + "server121":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server122":{ + "volumes":[], + "limit":300 + }, + "server123":{ + "volumes":[ + {"id":2, "size":12312}, + {"id":3, "size":12312}, + {"id":4, "size":12312} + ], + "limit":300 + }, + "server124":{ + "volumes":[], + "limit":300 + }, + "server125":{ + "volumes":[], + "limit":300 + }, + "server126":{ + "volumes":[], + "limit":300 + } + }, + "rack3":{ + "server131":{ + "volumes":[], + "limit":300 + }, + "server132":{ + "volumes":[], + "limit":300 + }, + "server133":{ + "volumes":[], + "limit":300 + }, + "server134":{ + "volumes":[], + "limit":300 + }, + "server135":{ + "volumes":[], + "limit":300 + }, + "server136":{ + "volumes":[], + "limit":300 + } + } + } +} +` + +func TestReplication011(t *testing.T) { + topo := setup(topologyLayout2) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("011") + volumeGrowOption := &VolumeGrowOption{ + Collection: "MAIL", + ReplicaPlacement: rp, + DataCenter: "dc1", + Rack: "", + DataNode: "", + } + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + fmt.Println("assigned node :", server.Id()) + } +} + +var topologyLayout3 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc2":{ + "rack2":{ + "server222":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc3":{ + "rack3":{ + "server333":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc4":{ + "rack4":{ + "server444":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc5":{ + "rack5":{ + "server555":{ + "volumes":[], + "limit":500 + } + } + }, + "dc6":{ + "rack6":{ + "server666":{ + "volumes":[], + "limit":500 + } + } + } +} +` + +func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { + topo := setup(topologyLayout3) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("100") + volumeGrowOption := &VolumeGrowOption{ + Collection: "Weight", + ReplicaPlacement: rp, + DataCenter: "", + Rack: "", + DataNode: "", + } + + distribution := map[NodeId]int{} + // assign 1000 volumes + for i := 0; i < 1000; i++ { + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + // fmt.Println("assigned node :", server.Id()) + if _, ok := distribution[server.id]; !ok { + distribution[server.id] = 0 + } + distribution[server.id] += 1 + } + } + + for k, v := range distribution { + fmt.Printf("%s : %d\n", k, v) + } +} diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 7633b28be..c7e171248 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -3,6 +3,7 @@ package topology import ( "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "math/rand" "sync" "time" @@ -13,15 +14,103 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) +type copyState int + +const ( + noCopies copyState = 0 + iota + insufficientCopies + enoughCopies +) + +type volumeState string + +const ( + readOnlyState volumeState = "ReadOnly" + oversizedState = "Oversized" +) + +type stateIndicator func(copyState) bool + +func ExistCopies() stateIndicator { + return func(state copyState) bool { return state != noCopies } +} + +func NoCopies() stateIndicator { + return func(state copyState) bool { return state == noCopies } +} + +type volumesBinaryState struct { + rp *super_block.ReplicaPlacement + name volumeState // the name for volume state (eg. "Readonly", "Oversized") + indicator stateIndicator // indicate whether the volumes should be marked as `name` + copyMap map[needle.VolumeId]*VolumeLocationList +} + +func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState { + return &volumesBinaryState{ + rp: rp, + name: name, + indicator: indicator, + copyMap: make(map[needle.VolumeId]*VolumeLocationList), + } +} + +func (v *volumesBinaryState) Dump() (res []uint32) { + for vid, list := range v.copyMap { + if v.indicator(v.copyState(list)) { + res = append(res, uint32(vid)) + } + } + return +} + +func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool { + list, _ := v.copyMap[vid] + return v.indicator(v.copyState(list)) +} + +func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) { + list, _ := v.copyMap[vid] + if list != nil { + list.Set(dn) + return + } + list = NewVolumeLocationList() + list.Set(dn) + v.copyMap[vid] = list +} + +func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) { + list, _ := v.copyMap[vid] + if list != nil { + list.Remove(dn) + if list.Length() == 0 { + delete(v.copyMap, vid) + } + } +} + +func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState { + if list == nil { + return noCopies + } + if list.Length() < v.rp.GetCopyCount() { + return insufficientCopies + } + return enoughCopies +} + // mapping from volume to its locations, inverted from server to volume type VolumeLayout struct { rp *super_block.ReplicaPlacement ttl *needle.TTL + diskType types.DiskType vid2location map[needle.VolumeId]*VolumeLocationList - writables []needle.VolumeId // transient array of writable volume id - readonlyVolumes map[needle.VolumeId]bool // transient set of readonly volumes - oversizedVolumes map[needle.VolumeId]bool // set of oversized volumes + writables []needle.VolumeId // transient array of writable volume id + readonlyVolumes *volumesBinaryState // readonly volumes + oversizedVolumes *volumesBinaryState // oversized volumes volumeSizeLimit uint64 + replicationAsMin bool accessLock sync.RWMutex } @@ -31,19 +120,23 @@ type VolumeLayoutStats struct { FileCount uint64 } -func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout { +func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout { return &VolumeLayout{ rp: rp, ttl: ttl, + diskType: diskType, vid2location: make(map[needle.VolumeId]*VolumeLocationList), writables: *new([]needle.VolumeId), - readonlyVolumes: make(map[needle.VolumeId]bool), - oversizedVolumes: make(map[needle.VolumeId]bool), + readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()), + oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()), volumeSizeLimit: volumeSizeLimit, + replicationAsMin: replicationAsMin, } } func (vl *VolumeLayout) String() string { + vl.accessLock.RLock() + defer vl.accessLock.RUnlock() return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit) } @@ -51,6 +144,8 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.accessLock.Lock() defer vl.accessLock.Unlock() + defer vl.rememberOversizedVolume(v, dn) + if _, ok := vl.vid2location[v.Id]; !ok { vl.vid2location[v.Id] = NewVolumeLocationList() } @@ -61,27 +156,26 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { if vInfo.ReadOnly { glog.V(1).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) - vl.readonlyVolumes[v.Id] = true + vl.readonlyVolumes.Add(v.Id, dn) return } else { - delete(vl.readonlyVolumes, v.Id) + vl.readonlyVolumes.Remove(v.Id, dn) } } else { glog.V(1).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) - delete(vl.readonlyVolumes, v.Id) + vl.readonlyVolumes.Remove(v.Id, dn) return } } - vl.rememberOversizedVolume(v) - vl.ensureCorrectWritables(v) - } -func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) { +func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) { if vl.isOversized(v) { - vl.oversizedVolumes[v.Id] = true + vl.oversizedVolumes.Add(v.Id, dn) + } else { + vl.oversizedVolumes.Remove(v.Id, dn) } } @@ -97,7 +191,9 @@ func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) { if location.Remove(dn) { - vl.ensureCorrectWritables(v) + vl.readonlyVolumes.Remove(v.Id, dn) + vl.oversizedVolumes.Remove(v.Id, dn) + vl.ensureCorrectWritables(v.Id) if location.Length() == 0 { delete(vl.vid2location, v.Id) @@ -106,23 +202,32 @@ func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) { } } -func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) { - if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) { - if _, ok := vl.oversizedVolumes[v.Id]; !ok { - vl.addToWritable(v.Id) +func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) { + vl.accessLock.Lock() + defer vl.accessLock.Unlock() + + vl.ensureCorrectWritables(v.Id) +} + +func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) { + if vl.enoughCopies(vid) && vl.isAllWritable(vid) { + if !vl.oversizedVolumes.IsTrue(vid) { + vl.setVolumeWritable(vid) } } else { - vl.removeFromWritable(v.Id) + vl.removeFromWritable(vid) } } -func (vl *VolumeLayout) addToWritable(vid needle.VolumeId) { - for _, id := range vl.writables { - if vid == id { - return +func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool { + for _, dn := range vl.vid2location[vid].list { + if v, getError := dn.GetVolumesById(vid); getError == nil { + if v.ReadOnly { + return false + } } } - vl.writables = append(vl.writables, vid) + return true } func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool { @@ -258,6 +363,8 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) if location, ok := vl.vid2location[vid]; ok { if location.Remove(dn) { + vl.readonlyVolumes.Remove(vid, dn) + vl.oversizedVolumes.Remove(vid, dn) if location.Length() < vl.rp.GetCopyCount() { glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount()) return vl.removeFromWritable(vid) @@ -266,17 +373,33 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) } return false } -func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bool { +func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool { vl.accessLock.Lock() defer vl.accessLock.Unlock() + vInfo, err := dn.GetVolumesById(vid) + if err != nil { + return false + } + vl.vid2location[vid].Set(dn) - if vl.vid2location[vid].Length() == vl.rp.GetCopyCount() { + + if vInfo.ReadOnly || isReadOnly { + return false + } + + if vl.enoughCopies(vid) { return vl.setVolumeWritable(vid) } return false } +func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool { + locations := vl.vid2location[vid].Length() + desired := vl.rp.GetCopyCount() + return locations == desired || (vl.replicationAsMin && locations > desired) +} + func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool { vl.accessLock.Lock() defer vl.accessLock.Unlock() @@ -306,10 +429,10 @@ func (vl *VolumeLayout) Stats() *VolumeLayoutStats { size, fileCount := vll.Stats(vid, freshThreshold) ret.FileCount += uint64(fileCount) ret.UsedSize += size - if vl.readonlyVolumes[vid] { + if vl.readonlyVolumes.IsTrue(vid) { ret.TotalSize += size } else { - ret.TotalSize += vl.volumeSizeLimit + ret.TotalSize += vl.volumeSizeLimit * uint64(vll.Length()) } } diff --git a/weed/topology/volume_layout_test.go b/weed/topology/volume_layout_test.go new file mode 100644 index 000000000..e148d6107 --- /dev/null +++ b/weed/topology/volume_layout_test.go @@ -0,0 +1,116 @@ +package topology + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func TestVolumesBinaryState(t *testing.T) { + vids := []needle.VolumeId{ + needle.VolumeId(1), + needle.VolumeId(2), + needle.VolumeId(3), + needle.VolumeId(4), + needle.VolumeId(5), + } + + dns := []*DataNode{ + &DataNode{ + Ip: "127.0.0.1", + Port: 8081, + }, + &DataNode{ + Ip: "127.0.0.1", + Port: 8082, + }, + &DataNode{ + Ip: "127.0.0.1", + Port: 8083, + }, + } + + rp, _ := super_block.NewReplicaPlacementFromString("002") + + state_exist := NewVolumesBinaryState(readOnlyState, rp, ExistCopies()) + state_exist.Add(vids[0], dns[0]) + state_exist.Add(vids[0], dns[1]) + state_exist.Add(vids[1], dns[2]) + state_exist.Add(vids[2], dns[1]) + state_exist.Add(vids[4], dns[1]) + state_exist.Add(vids[4], dns[2]) + + state_no := NewVolumesBinaryState(readOnlyState, rp, NoCopies()) + state_no.Add(vids[0], dns[0]) + state_no.Add(vids[0], dns[1]) + state_no.Add(vids[3], dns[1]) + + tests := []struct { + name string + state *volumesBinaryState + expectResult []bool + update func() + expectResultAfterUpdate []bool + }{ + { + name: "mark true when exist copies", + state: state_exist, + expectResult: []bool{true, true, true, false, true}, + update: func() { + state_exist.Remove(vids[0], dns[2]) + state_exist.Remove(vids[1], dns[2]) + state_exist.Remove(vids[3], dns[2]) + state_exist.Remove(vids[4], dns[1]) + state_exist.Remove(vids[4], dns[2]) + }, + expectResultAfterUpdate: []bool{true, false, true, false, false}, + }, + { + name: "mark true when inexist copies", + state: state_no, + expectResult: []bool{false, true, true, false, true}, + update: func() { + state_no.Remove(vids[0], dns[2]) + state_no.Remove(vids[1], dns[2]) + state_no.Add(vids[2], dns[1]) + state_no.Remove(vids[3], dns[1]) + state_no.Remove(vids[4], dns[2]) + }, + expectResultAfterUpdate: []bool{false, true, false, true, true}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var result []bool + for index, _ := range vids { + result = append(result, test.state.IsTrue(vids[index])) + } + if len(result) != len(test.expectResult) { + t.Fatalf("len(result) != len(expectResult), got %d, expected %d\n", + len(result), len(test.expectResult)) + } + for index, val := range result { + if val != test.expectResult[index] { + t.Fatalf("result not matched, index %d, got %v, expect %v\n", + index, val, test.expectResult[index]) + } + } + test.update() + var updateResult []bool + for index, _ := range vids { + updateResult = append(updateResult, test.state.IsTrue(vids[index])) + } + if len(updateResult) != len(test.expectResultAfterUpdate) { + t.Fatalf("len(updateResult) != len(expectResultAfterUpdate), got %d, expected %d\n", + len(updateResult), len(test.expectResultAfterUpdate)) + } + for index, val := range updateResult { + if val != test.expectResultAfterUpdate[index] { + t.Fatalf("update result not matched, index %d, got %v, expect %v\n", + index, val, test.expectResultAfterUpdate[index]) + } + } + }) + } +} diff --git a/weed/topology/volume_location_list.go b/weed/topology/volume_location_list.go index 8905c54b5..548c4cd25 100644 --- a/weed/topology/volume_location_list.go +++ b/weed/topology/volume_location_list.go @@ -18,12 +18,23 @@ func (dnll *VolumeLocationList) String() string { return fmt.Sprintf("%v", dnll.list) } +func (dnll *VolumeLocationList) Copy() *VolumeLocationList { + list := make([]*DataNode, len(dnll.list)) + copy(list, dnll.list) + return &VolumeLocationList{ + list: list, + } +} + func (dnll *VolumeLocationList) Head() *DataNode { //mark first node as master volume return dnll.list[0] } func (dnll *VolumeLocationList) Length() int { + if dnll == nil { + return 0 + } return len(dnll.list) } @@ -71,7 +82,7 @@ func (dnll *VolumeLocationList) Stats(vid needle.VolumeId, freshThreshHold int64 if dnl.LastSeen < freshThreshHold { vinfo, err := dnl.GetVolumesById(vid) if err == nil { - return vinfo.Size - vinfo.DeletedByteCount, vinfo.FileCount - vinfo.DeleteCount + return (vinfo.Size - vinfo.DeletedByteCount) * uint64(len(dnll.list)), vinfo.FileCount - vinfo.DeleteCount } } } diff --git a/weed/util/bounded_tree/bounded_tree.go b/weed/util/bounded_tree/bounded_tree.go new file mode 100644 index 000000000..3a8a22a9c --- /dev/null +++ b/weed/util/bounded_tree/bounded_tree.go @@ -0,0 +1,182 @@ +package bounded_tree + +import ( + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type Node struct { + Parent *Node + Name string + Children map[string]*Node +} + +type BoundedTree struct { + root *Node + sync.RWMutex + baseDir util.FullPath +} + +func NewBoundedTree(baseDir util.FullPath) *BoundedTree { + return &BoundedTree{ + root: &Node{ + Name: "/", + }, + baseDir: baseDir, + } +} + +type VisitNodeFunc func(path util.FullPath) (childDirectories []string, err error) + +// If the path is not visited, call the visitFn for each level of directory +// No action if the directory has been visited before or does not exist. +// A leaf node, which has no children, represents a directory not visited. +// A non-leaf node or a non-existing node represents a directory already visited, or does not need to visit. +func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) (visitErr error) { + t.Lock() + defer t.Unlock() + + if t.root == nil { + return + } + if t.baseDir != "/" { + p = p[len(t.baseDir):] + } + components := p.Split() + // fmt.Printf("components %v %d\n", components, len(components)) + canDelete, err := t.ensureVisited(t.root, t.baseDir, components, 0, visitFn) + if err != nil { + return err + } + if canDelete { + t.root = nil + } + return nil +} + +func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, components []string, i int, visitFn VisitNodeFunc) (canDeleteNode bool, visitErr error) { + + // println("ensureVisited", currentPath, i) + + if n == nil { + // fmt.Printf("%s null\n", currentPath) + return + } + + if n.isVisited() { + // fmt.Printf("%s visited %v\n", currentPath, n.Name) + } else { + // fmt.Printf("ensure %v\n", currentPath) + + children, err := visitFn(currentPath) + if err != nil { + glog.V(0).Infof("failed to visit %s: %v", currentPath, err) + return false, err + } + + if len(children) == 0 { + // fmt.Printf(" canDelete %v without children\n", currentPath) + return true, nil + } + + n.Children = make(map[string]*Node) + for _, child := range children { + // fmt.Printf(" add child %v %v\n", currentPath, child) + n.Children[child] = &Node{ + Name: child, + } + } + } + + if i >= len(components) { + return + } + + // fmt.Printf(" check child %v %v\n", currentPath, components[i]) + + toVisitNode, found := n.Children[components[i]] + if !found { + // fmt.Printf(" did not find child %v %v\n", currentPath, components[i]) + return + } + + // fmt.Printf(" ensureVisited %v %v\n", currentPath, toVisitNode.Name) + canDelete, childVisitErr := t.ensureVisited(toVisitNode, currentPath.Child(components[i]), components, i+1, visitFn) + if childVisitErr != nil { + return false, childVisitErr + } + if canDelete { + + // fmt.Printf(" delete %v %v\n", currentPath, components[i]) + delete(n.Children, components[i]) + + if len(n.Children) == 0 { + // fmt.Printf(" canDelete %v\n", currentPath) + return true, nil + } + } + + return false, nil + +} + +func (n *Node) isVisited() bool { + if n == nil { + return true + } + if len(n.Children) > 0 { + return true + } + return false +} + +func (n *Node) getChild(childName string) *Node { + if n == nil { + return nil + } + if len(n.Children) > 0 { + return n.Children[childName] + } + return nil +} + +func (t *BoundedTree) HasVisited(p util.FullPath) bool { + + t.RLock() + defer t.RUnlock() + + if t.root == nil { + return true + } + + components := p.Split() + // fmt.Printf("components %v %d\n", components, len(components)) + return t.hasVisited(t.root, util.FullPath("/"), components, 0) +} + +func (t *BoundedTree) hasVisited(n *Node, currentPath util.FullPath, components []string, i int) bool { + + if n == nil { + return true + } + + if !n.isVisited() { + return false + } + + // fmt.Printf(" hasVisited child %v %+v %d\n", currentPath, components, i) + + if i >= len(components) { + return true + } + + toVisitNode, found := n.Children[components[i]] + if !found { + return true + } + + return t.hasVisited(toVisitNode, currentPath.Child(components[i]), components, i+1) + +} diff --git a/weed/util/bounded_tree/bounded_tree_test.go b/weed/util/bounded_tree/bounded_tree_test.go new file mode 100644 index 000000000..465f1cc9c --- /dev/null +++ b/weed/util/bounded_tree/bounded_tree_test.go @@ -0,0 +1,126 @@ +package bounded_tree + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + visitFn = func(path util.FullPath) (childDirectories []string, err error) { + fmt.Printf(" visit %v ...\n", path) + switch path { + case "/": + return []string{"a", "g", "h"}, nil + case "/a": + return []string{"b", "f"}, nil + case "/a/b": + return []string{"c", "e"}, nil + case "/a/b/c": + return []string{"d"}, nil + case "/a/b/c/d": + return []string{"i", "j"}, nil + case "/a/b/c/d/i": + return []string{}, nil + case "/a/b/c/d/j": + return []string{}, nil + case "/a/b/e": + return []string{}, nil + case "/a/f": + return []string{}, nil + } + return nil, nil + } + + printMap = func(m map[string]*Node) { + for k := range m { + println(" >", k) + } + } +) + +func TestBoundedTree(t *testing.T) { + + // a/b/c/d/i + // a/b/c/d/j + // a/b/c/d + // a/b/e + // a/f + // g + // h + + tree := NewBoundedTree(util.FullPath("/")) + + tree.EnsureVisited(util.FullPath("/a/b/c"), visitFn) + + assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b"))) + assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b/c"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/c/d"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/e"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/f"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/g"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/h"))) + assert.Equal(t, true, tree.HasVisited(util.FullPath("/"))) + assert.Equal(t, true, tree.HasVisited(util.FullPath("/x"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/a/b/e/x"))) + + printMap(tree.root.Children) + + a := tree.root.getChild("a") + + b := a.getChild("b") + if !b.isVisited() { + t.Errorf("expect visited /a/b") + } + c := b.getChild("c") + if !c.isVisited() { + t.Errorf("expect visited /a/b/c") + } + + d := c.getChild("d") + if d.isVisited() { + t.Errorf("expect unvisited /a/b/c/d") + } + + tree.EnsureVisited(util.FullPath("/a/b/c/d"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/c/d/i"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/c/d/j"), visitFn) + tree.EnsureVisited(util.FullPath("/a/b/e"), visitFn) + tree.EnsureVisited(util.FullPath("/a/f"), visitFn) + + printMap(tree.root.Children) + +} + +func TestEmptyBoundedTree(t *testing.T) { + + // g + // h + + tree := NewBoundedTree(util.FullPath("/")) + + visitFn := func(path util.FullPath) (childDirectories []string, err error) { + fmt.Printf(" visit %v ...\n", path) + switch path { + case "/": + return []string{"g", "h"}, nil + } + t.Fatalf("expected visit %s", path) + return nil, nil + } + + tree.EnsureVisited(util.FullPath("/a/b"), visitFn) + + tree.EnsureVisited(util.FullPath("/a/b"), visitFn) + + printMap(tree.root.Children) + + assert.Equal(t, true, tree.HasVisited(util.FullPath("/a/b"))) + assert.Equal(t, true, tree.HasVisited(util.FullPath("/a"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/g"))) + assert.Equal(t, false, tree.HasVisited(util.FullPath("/g/x"))) + +} diff --git a/weed/util/buffered_writer/buffered_writer.go b/weed/util/buffered_writer/buffered_writer.go new file mode 100644 index 000000000..73d9f4995 --- /dev/null +++ b/weed/util/buffered_writer/buffered_writer.go @@ -0,0 +1,52 @@ +package buffered_writer + +import ( + "bytes" + "io" +) + +var _ = io.WriteCloser(&BufferedWriteCloser{}) + +type BufferedWriteCloser struct { + buffer bytes.Buffer + bufferLimit int + position int64 + nextFlushOffset int64 + FlushFunc func([]byte, int64) error + CloseFunc func() error +} + +func NewBufferedWriteCloser(bufferLimit int) *BufferedWriteCloser { + return &BufferedWriteCloser{ + bufferLimit: bufferLimit, + } +} + +func (b *BufferedWriteCloser) Write(p []byte) (n int, err error) { + + if b.buffer.Len()+len(p) >= b.bufferLimit { + if err := b.FlushFunc(b.buffer.Bytes(), b.nextFlushOffset); err != nil { + return 0, err + } + b.nextFlushOffset += int64(b.buffer.Len()) + b.buffer.Reset() + } + + return b.buffer.Write(p) + +} + +func (b *BufferedWriteCloser) Close() error { + if b.buffer.Len() > 0 { + if err := b.FlushFunc(b.buffer.Bytes(), b.nextFlushOffset); err != nil { + return err + } + } + if b.CloseFunc != nil { + if err := b.CloseFunc(); err != nil { + return err + } + } + + return nil +} diff --git a/weed/util/bytes.go b/weed/util/bytes.go index dfa4ae665..c2a4df108 100644 --- a/weed/util/bytes.go +++ b/weed/util/bytes.go @@ -1,5 +1,30 @@ package util +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "encoding/base64" + "fmt" + "io" +) + +// BytesToHumanReadable returns the converted human readable representation of the bytes. +func BytesToHumanReadable(b uint64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + + div, exp := uint64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + + return fmt.Sprintf("%.2f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) +} + // big endian func BytesToUint64(b []byte) (v uint64) { @@ -43,3 +68,96 @@ func Uint16toBytes(b []byte, v uint16) { func Uint8toBytes(b []byte, v uint8) { b[0] = byte(v) } + +// returns a 64 bit big int +func HashStringToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} + +func HashToInt32(data []byte) (v int32) { + h := md5.New() + h.Write(data) + + b := h.Sum(nil) + + v += int32(b[0]) + v <<= 8 + v += int32(b[1]) + v <<= 8 + v += int32(b[2]) + v <<= 8 + v += int32(b[3]) + + return +} + +func Base64Encode(data []byte) string { + return base64.StdEncoding.EncodeToString(data) +} + +func Base64Md5(data []byte) string { + return Base64Encode(Md5(data)) +} + +func Md5(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +func Md5String(data []byte) string { + return fmt.Sprintf("%x", Md5(data)) +} + +func Base64Md5ToBytes(contentMd5 string) []byte { + data, err := base64.StdEncoding.DecodeString(contentMd5) + if err != nil { + return nil + } + return data +} + +func RandomInt32() int32 { + buf := make([]byte, 4) + rand.Read(buf) + return int32(BytesToUint32(buf)) +} + +func RandomBytes(byteCount int) []byte { + buf := make([]byte, byteCount) + rand.Read(buf) + return buf +} + +type BytesReader struct { + Bytes []byte + *bytes.Reader +} + +func NewBytesReader(b []byte) *BytesReader { + return &BytesReader{ + Bytes: b, + Reader: bytes.NewReader(b), + } +} diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go new file mode 100644 index 000000000..3615aee0e --- /dev/null +++ b/weed/util/chunk_cache/chunk_cache.go @@ -0,0 +1,133 @@ +package chunk_cache + +import ( + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +type ChunkCache interface { + GetChunk(fileId string, minSize uint64) (data []byte) + SetChunk(fileId string, data []byte) +} + +// a global cache for recently accessed file chunks +type TieredChunkCache struct { + memCache *ChunkCacheInMemory + diskCaches []*OnDiskCacheLayer + sync.RWMutex + onDiskCacheSizeLimit0 uint64 + onDiskCacheSizeLimit1 uint64 + onDiskCacheSizeLimit2 uint64 +} + +func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache { + + c := &TieredChunkCache{ + memCache: NewChunkCacheInMemory(maxEntries), + } + c.diskCaches = make([]*OnDiskCacheLayer, 3) + c.onDiskCacheSizeLimit0 = uint64(unitSize) + c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0 + c.onDiskCacheSizeLimit2 = 2 * c.onDiskCacheSizeLimit1 + c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_2", diskSizeInUnit*unitSize/8, 2) + c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_3", diskSizeInUnit*unitSize/4+diskSizeInUnit*unitSize/8, 3) + c.diskCaches[2] = NewOnDiskCacheLayer(dir, "c2_2", diskSizeInUnit*unitSize/2, 2) + + return c +} + +func (c *TieredChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) { + if c == nil { + return + } + + c.RLock() + defer c.RUnlock() + + return c.doGetChunk(fileId, minSize) +} + +func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) { + + if minSize <= c.onDiskCacheSizeLimit0 { + data = c.memCache.GetChunk(fileId) + if len(data) >= int(minSize) { + return data + } + } + + fid, err := needle.ParseFileIdFromString(fileId) + if err != nil { + glog.Errorf("failed to parse file id %s", fileId) + return nil + } + + if minSize <= c.onDiskCacheSizeLimit0 { + data = c.diskCaches[0].getChunk(fid.Key) + if len(data) >= int(minSize) { + return data + } + } + if minSize <= c.onDiskCacheSizeLimit1 { + data = c.diskCaches[1].getChunk(fid.Key) + if len(data) >= int(minSize) { + return data + } + } + { + data = c.diskCaches[2].getChunk(fid.Key) + if len(data) >= int(minSize) { + return data + } + } + + return nil + +} + +func (c *TieredChunkCache) SetChunk(fileId string, data []byte) { + if c == nil { + return + } + c.Lock() + defer c.Unlock() + + glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data)) + + c.doSetChunk(fileId, data) +} + +func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { + + if len(data) <= int(c.onDiskCacheSizeLimit0) { + c.memCache.SetChunk(fileId, data) + } + + fid, err := needle.ParseFileIdFromString(fileId) + if err != nil { + glog.Errorf("failed to parse file id %s", fileId) + return + } + + if len(data) <= int(c.onDiskCacheSizeLimit0) { + c.diskCaches[0].setChunk(fid.Key, data) + } else if len(data) <= int(c.onDiskCacheSizeLimit1) { + c.diskCaches[1].setChunk(fid.Key, data) + } else { + c.diskCaches[2].setChunk(fid.Key, data) + } + +} + +func (c *TieredChunkCache) Shutdown() { + if c == nil { + return + } + c.Lock() + defer c.Unlock() + for _, diskCache := range c.diskCaches { + diskCache.shutdown() + } +} diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go new file mode 100644 index 000000000..5f26b8c78 --- /dev/null +++ b/weed/util/chunk_cache/chunk_cache_in_memory.go @@ -0,0 +1,38 @@ +package chunk_cache + +import ( + "time" + + "github.com/karlseguin/ccache/v2" +) + +// a global cache for recently accessed file chunks +type ChunkCacheInMemory struct { + cache *ccache.Cache +} + +func NewChunkCacheInMemory(maxEntries int64) *ChunkCacheInMemory { + pruneCount := maxEntries >> 3 + if pruneCount <= 0 { + pruneCount = 500 + } + return &ChunkCacheInMemory{ + cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))), + } +} + +func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte { + item := c.cache.Get(fileId) + if item == nil { + return nil + } + data := item.Value().([]byte) + item.Extend(time.Hour) + return data +} + +func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) { + localCopy := make([]byte, len(data)) + copy(localCopy, data) + c.cache.Set(fileId, localCopy, time.Hour) +} diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go new file mode 100644 index 000000000..6f87a9a06 --- /dev/null +++ b/weed/util/chunk_cache/chunk_cache_on_disk.go @@ -0,0 +1,147 @@ +package chunk_cache + +import ( + "fmt" + "os" + "time" + + "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// This implements an on disk cache +// The entries are an FIFO with a size limit + +type ChunkCacheVolume struct { + DataBackend backend.BackendStorageFile + nm storage.NeedleMapper + fileName string + smallBuffer []byte + sizeLimit int64 + lastModTime time.Time + fileSize int64 +} + +func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) { + + v := &ChunkCacheVolume{ + smallBuffer: make([]byte, types.NeedlePaddingSize), + fileName: fileName, + sizeLimit: preallocate, + } + + var err error + + if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists { + if !canRead { + return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName) + } + if !canWrite { + return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName) + } + if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil { + return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err) + } else { + v.DataBackend = backend.NewDiskFile(dataFile) + v.lastModTime = modTime + v.fileSize = fileSize + } + } else { + if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil { + return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err) + } + v.lastModTime = time.Now() + } + + var indexFile *os.File + if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil { + return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err) + } + + glog.V(1).Infoln("loading leveldb", v.fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil { + return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err) + } + + return v, nil + +} + +func (v *ChunkCacheVolume) Shutdown() { + if v.DataBackend != nil { + v.DataBackend.Close() + v.DataBackend = nil + } + if v.nm != nil { + v.nm.Close() + v.nm = nil + } +} + +func (v *ChunkCacheVolume) doReset() { + v.Shutdown() + os.Truncate(v.fileName + ".dat", 0) + os.Truncate(v.fileName + ".idx", 0) + glog.V(4).Infof("cache removeAll %s ...", v.fileName + ".ldb") + os.RemoveAll(v.fileName + ".ldb") + glog.V(4).Infof("cache removed %s", v.fileName + ".ldb") +} + +func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) { + v.doReset() + return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit) +} + +func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) { + + nv, ok := v.nm.Get(key) + if !ok { + return nil, storage.ErrorNotFound + } + data := make([]byte, nv.Size) + if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil { + return nil, fmt.Errorf("read %s.dat [%d,%d): %v", + v.fileName, nv.Offset.ToActualOffset(), nv.Offset.ToActualOffset()+int64(nv.Size), readErr) + } else { + if readSize != int(nv.Size) { + return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size) + } + } + + return data, nil +} + +func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error { + + offset := v.fileSize + + written, err := v.DataBackend.WriteAt(data, offset) + if err != nil { + return err + } else if written != len(data) { + return fmt.Errorf("partial written %d, expected %d", written, len(data)) + } + + v.fileSize += int64(written) + extraSize := written % types.NeedlePaddingSize + if extraSize != 0 { + v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written)) + v.fileSize += int64(types.NeedlePaddingSize - extraSize) + } + + if err := v.nm.Put(key, types.ToOffset(offset), types.Size(len(data))); err != nil { + return err + } + + return nil +} diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go new file mode 100644 index 000000000..f8325276e --- /dev/null +++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go @@ -0,0 +1,98 @@ +package chunk_cache + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "testing" +) + +func TestOnDisk(t *testing.T) { + + tmpDir, _ := ioutil.TempDir("", "c") + defer os.RemoveAll(tmpDir) + + totalDiskSizeInKB := int64(32) + + cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024) + + writeCount := 5 + type test_data struct { + data []byte + fileId string + size uint64 + } + testData := make([]*test_data, writeCount) + for i := 0; i < writeCount; i++ { + buff := make([]byte, 1024) + rand.Read(buff) + testData[i] = &test_data{ + data: buff, + fileId: fmt.Sprintf("1,%daabbccdd", i+1), + size: uint64(len(buff)), + } + cache.SetChunk(testData[i].fileId, testData[i].data) + + // read back right after write + data := cache.GetChunk(testData[i].fileId, testData[i].size) + if bytes.Compare(data, testData[i].data) != 0 { + t.Errorf("failed to write to and read from cache: %d", i) + } + } + + for i := 0; i < 2; i++ { + data := cache.GetChunk(testData[i].fileId, testData[i].size) + if bytes.Compare(data, testData[i].data) == 0 { + t.Errorf("old cache should have been purged: %d", i) + } + } + + for i := 2; i < writeCount; i++ { + data := cache.GetChunk(testData[i].fileId, testData[i].size) + if bytes.Compare(data, testData[i].data) != 0 { + t.Errorf("failed to write to and read from cache: %d", i) + } + } + + cache.Shutdown() + + cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024) + + for i := 0; i < 2; i++ { + data := cache.GetChunk(testData[i].fileId, testData[i].size) + if bytes.Compare(data, testData[i].data) == 0 { + t.Errorf("old cache should have been purged: %d", i) + } + } + + for i := 2; i < writeCount; i++ { + if i == 4 { + // FIXME this failed many times on build machines + /* + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_0.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_1.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_0.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_1.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 4096 bytes disk space for /tmp/c578652251/c1_3_2.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 8192 bytes disk space for /tmp/c578652251/c2_2_0.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 8192 bytes disk space for /tmp/c578652251/c2_2_1.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_0.dat + I0928 06:04:12 10979 volume_create_linux.go:19] Preallocated 2048 bytes disk space for /tmp/c578652251/c0_2_1.dat + --- FAIL: TestOnDisk (0.19s) + chunk_cache_on_disk_test.go:73: failed to write to and read from cache: 4 + FAIL + FAIL github.com/chrislusf/seaweedfs/weed/util/chunk_cache 0.199s + */ + continue + } + data := cache.GetChunk(testData[i].fileId, testData[i].size) + if bytes.Compare(data, testData[i].data) != 0 { + t.Errorf("failed to write to and read from cache: %d", i) + } + } + + cache.Shutdown() + +} diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go new file mode 100644 index 000000000..eebd89798 --- /dev/null +++ b/weed/util/chunk_cache/on_disk_cache_layer.go @@ -0,0 +1,91 @@ +package chunk_cache + +import ( + "fmt" + "path" + "sort" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +type OnDiskCacheLayer struct { + diskCaches []*ChunkCacheVolume +} + +func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount int) *OnDiskCacheLayer { + + volumeCount, volumeSize := int(diskSize/(30000*1024*1024)), int64(30000*1024*1024) + if volumeCount < segmentCount { + volumeCount, volumeSize = segmentCount, diskSize/int64(segmentCount) + } + + c := &OnDiskCacheLayer{} + for i := 0; i < volumeCount; i++ { + fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) + diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize) + if err != nil { + glog.Errorf("failed to add cache %s : %v", fileName, err) + } else { + c.diskCaches = append(c.diskCaches, diskCache) + } + } + + // keep newest cache to the front + sort.Slice(c.diskCaches, func(i, j int) bool { + return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime) + }) + + return c +} + +func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { + + if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit { + t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset() + if resetErr != nil { + glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) + return + } + for i := len(c.diskCaches) - 1; i > 0; i-- { + c.diskCaches[i] = c.diskCaches[i-1] + } + c.diskCaches[0] = t + } + + if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil { + glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err) + } + +} + +func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) { + + var err error + + for _, diskCache := range c.diskCaches { + data, err = diskCache.GetNeedle(needleId) + if err == storage.ErrorNotFound { + continue + } + if err != nil { + glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) + continue + } + if len(data) != 0 { + return + } + } + + return nil + +} + +func (c *OnDiskCacheLayer) shutdown() { + + for _, diskCache := range c.diskCaches { + diskCache.Shutdown() + } + +} diff --git a/weed/util/cipher.go b/weed/util/cipher.go new file mode 100644 index 000000000..f044c2ca3 --- /dev/null +++ b/weed/util/cipher.go @@ -0,0 +1,60 @@ +package util + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type CipherKey []byte + +func GenCipherKey() CipherKey { + key := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + glog.Fatalf("random key gen: %v", err) + } + return CipherKey(key) +} + +func Encrypt(plaintext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err = io.ReadFull(rand.Reader, nonce); err != nil { + return nil, err + } + + return gcm.Seal(nonce, nonce, plaintext, nil), nil +} + +func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonceSize := gcm.NonceSize() + if len(ciphertext) < nonceSize { + return nil, errors.New("ciphertext too short") + } + + nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] + return gcm.Open(nil, nonce, ciphertext, nil) +} diff --git a/weed/util/cipher_test.go b/weed/util/cipher_test.go new file mode 100644 index 000000000..026c96ea3 --- /dev/null +++ b/weed/util/cipher_test.go @@ -0,0 +1,17 @@ +package util + +import ( + "encoding/base64" + "testing" +) + +func TestSameAsJavaImplementation(t *testing.T) { + str := "QVVhmqg112NMT7F+G/7QPynqSln3xPIhKdFGmTVKZD6IS0noyr2Z5kXFF6fPjZ/7Hq8kRhlmLeeqZUccxyaZHezOdgkjS6d4NTdHf5IjXzk7" + cipherText, _ := base64.StdEncoding.DecodeString(str) + secretKey := []byte("256-bit key for AES 256 GCM encr") + plantext, err := Decrypt(cipherText, CipherKey(secretKey)) + if err != nil { + println(err.Error()) + } + println(string(plantext)) +} diff --git a/weed/util/compression.go b/weed/util/compression.go index c6c9423e2..9d52810cb 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -4,54 +4,111 @@ import ( "bytes" "compress/flate" "compress/gzip" + "fmt" "io/ioutil" "strings" "github.com/chrislusf/seaweedfs/weed/glog" - "golang.org/x/tools/godoc/util" + // "github.com/klauspost/compress/zstd" ) +var ( + UnsupportedCompression = fmt.Errorf("unsupported compression") +) + +func MaybeGzipData(input []byte) []byte { + if IsGzippedContent(input) { + return input + } + gzipped, err := GzipData(input) + if err != nil { + return input + } + if len(gzipped)*10 > len(input)*9 { + return input + } + return gzipped +} + +func MaybeDecompressData(input []byte) []byte { + uncompressed, err := DecompressData(input) + if err != nil { + if err != UnsupportedCompression { + glog.Errorf("decompressed data: %v", err) + } + return input + } + return uncompressed +} + func GzipData(input []byte) ([]byte, error) { buf := new(bytes.Buffer) w, _ := gzip.NewWriterLevel(buf, flate.BestSpeed) if _, err := w.Write(input); err != nil { - glog.V(2).Infoln("error compressing data:", err) + glog.V(2).Infof("error gzip data: %v", err) return nil, err } if err := w.Close(); err != nil { - glog.V(2).Infoln("error closing compressed data:", err) + glog.V(2).Infof("error closing gzipped data: %v", err) return nil, err } return buf.Bytes(), nil } -func UnGzipData(input []byte) ([]byte, error) { + +func DecompressData(input []byte) ([]byte, error) { + if IsGzippedContent(input) { + return ungzipData(input) + } + /* + if IsZstdContent(input) { + return unzstdData(input) + } + */ + return input, UnsupportedCompression +} + +func ungzipData(input []byte) ([]byte, error) { buf := bytes.NewBuffer(input) r, _ := gzip.NewReader(buf) defer r.Close() output, err := ioutil.ReadAll(r) if err != nil { - glog.V(2).Infoln("error uncompressing data:", err) + glog.V(2).Infof("error ungzip data: %v", err) } return output, err } +func IsGzippedContent(data []byte) bool { + if len(data) < 2 { + return false + } + return data[0] == 31 && data[1] == 139 +} + /* -* Default more not to gzip since gzip can be done on client side. - */func IsGzippable(ext, mtype string, data []byte) bool { +var zstdEncoder, _ = zstd.NewWriter(nil) - shouldBeZipped, iAmSure := IsGzippableFileType(ext, mtype) - if iAmSure { - return shouldBeZipped - } +func ZstdData(input []byte) ([]byte, error) { + return zstdEncoder.EncodeAll(input, nil), nil +} - isMostlyText := util.IsText(data) +var decoder, _ = zstd.NewReader(nil) - return isMostlyText +func unzstdData(input []byte) ([]byte, error) { + return decoder.DecodeAll(input, nil) } +func IsZstdContent(data []byte) bool { + if len(data) < 4 { + return false + } + return data[3] == 0xFD && data[2] == 0x2F && data[1] == 0xB5 && data[0] == 0x28 +} +*/ + /* -* Default more not to gzip since gzip can be done on client side. - */func IsGzippableFileType(ext, mtype string) (shouldBeZipped, iAmSure bool) { +* Default not to compressed since compression can be done on client side. + */func IsCompressableFileType(ext, mtype string) (shouldBeCompressed, iAmSure bool) { // text if strings.HasPrefix(mtype, "text/") { @@ -60,7 +117,7 @@ func UnGzipData(input []byte) ([]byte, error) { // images switch ext { - case ".svg", ".bmp": + case ".svg", ".bmp", ".wav": return true, true } if strings.HasPrefix(mtype, "image/") { @@ -69,7 +126,7 @@ func UnGzipData(input []byte) ([]byte, error) { // by file name extension switch ext { - case ".zip", ".rar", ".gz", ".bz2", ".xz": + case ".zip", ".rar", ".gz", ".bz2", ".xz", ".zst": return false, true case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json": return true, true @@ -81,6 +138,9 @@ func UnGzipData(input []byte) ([]byte, error) { // by mime type if strings.HasPrefix(mtype, "application/") { + if strings.HasSuffix(mtype, "zstd") { + return false, true + } if strings.HasSuffix(mtype, "xml") { return true, true } @@ -89,5 +149,12 @@ func UnGzipData(input []byte) ([]byte, error) { } } + if strings.HasPrefix(mtype, "audio/") { + switch strings.TrimPrefix(mtype, "audio/") { + case "wave", "wav", "x-wav", "x-pn-wav": + return true, true + } + } + return false, false } diff --git a/weed/util/compression_test.go b/weed/util/compression_test.go new file mode 100644 index 000000000..b515e8988 --- /dev/null +++ b/weed/util/compression_test.go @@ -0,0 +1,21 @@ +package util + +import ( + "testing" + + "golang.org/x/tools/godoc/util" +) + +func TestIsGzippable(t *testing.T) { + buf := make([]byte, 1024) + + isText := util.IsText(buf) + + if isText { + t.Error("buf with zeros are not text") + } + + compressed, _ := GzipData(buf) + + t.Logf("compressed size %d\n", len(compressed)) +} diff --git a/weed/util/config.go b/weed/util/config.go index 4ba68b800..ee805f26a 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -1,8 +1,12 @@ package util import ( - "github.com/chrislusf/seaweedfs/weed/glog" + "strings" + "sync" + "github.com/spf13/viper" + + "github.com/chrislusf/seaweedfs/weed/glog" ) type Configuration interface { @@ -16,21 +20,23 @@ type Configuration interface { func LoadConfiguration(configFileName string, required bool) (loaded bool) { // find a filer store - viper.SetConfigName(configFileName) // name of config file (without extension) - viper.AddConfigPath(".") // optionally look for config in the working directory - viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths - viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in + viper.SetConfigName(configFileName) // name of config file (without extension) + viper.AddConfigPath(".") // optionally look for config in the working directory + viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths + viper.AddConfigPath("/usr/local/etc/seaweedfs/") // search path for bsd-style config directory in + viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file - glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + if strings.Contains(err.Error(), "Not Found") { + glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + } else { + glog.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err) + } if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ + "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) } else { @@ -40,3 +46,56 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { return true } + +type ViperProxy struct { + *viper.Viper + sync.Mutex +} + +var ( + vp = &ViperProxy{} +) + +func (vp *ViperProxy) SetDefault(key string, value interface{}) { + vp.Lock() + defer vp.Unlock() + vp.Viper.SetDefault(key, value) +} + +func (vp *ViperProxy) GetString(key string) string { + vp.Lock() + defer vp.Unlock() + return vp.Viper.GetString(key) +} + +func (vp *ViperProxy) GetBool(key string) bool { + vp.Lock() + defer vp.Unlock() + return vp.Viper.GetBool(key) +} + +func (vp *ViperProxy) GetInt(key string) int { + vp.Lock() + defer vp.Unlock() + return vp.Viper.GetInt(key) +} + +func (vp *ViperProxy) GetStringSlice(key string) []string { + vp.Lock() + defer vp.Unlock() + return vp.Viper.GetStringSlice(key) +} + +func GetViper() *ViperProxy { + vp.Lock() + defer vp.Unlock() + + if vp.Viper == nil { + vp.Viper = viper.GetViper() + vp.AutomaticEnv() + vp.SetEnvPrefix("weed") + vp.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + } + + return vp +} diff --git a/weed/util/constants.go b/weed/util/constants.go index 0916850ef..c595f0c53 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,10 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 47) + VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 41) + COMMIT = "" ) + +func Version() string { + return VERSION + " " + COMMIT +} diff --git a/weed/util/file_util.go b/weed/util/file_util.go index bef9f7cd6..f83f80265 100644 --- a/weed/util/file_util.go +++ b/weed/util/file_util.go @@ -3,6 +3,9 @@ package util import ( "errors" "os" + "os/user" + "path/filepath" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -49,6 +52,10 @@ func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti exists = false return } + if err != nil { + glog.Errorf("check %s: %v", filename, err) + return + } if fi.Mode()&0400 != 0 { canRead = true } @@ -59,3 +66,24 @@ func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti fileSize = fi.Size() return } + +func ResolvePath(path string) string { + + if !strings.Contains(path, "~") { + return path + } + + usr, _ := user.Current() + dir := usr.HomeDir + + if path == "~" { + // In case of "~", which won't be caught by the "else if" + path = dir + } else if strings.HasPrefix(path, "~/") { + // Use strings.HasPrefix so we don't match paths like + // "/something/~/something/" + path = filepath.Join(dir, path[2:]) + } + + return path +} diff --git a/weed/util/fla9/fla9.go b/weed/util/fla9/fla9.go new file mode 100644 index 000000000..eb5700e8c --- /dev/null +++ b/weed/util/fla9/fla9.go @@ -0,0 +1,1149 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + import "flag" + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments following the flags are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + Command line flag syntax: + -flag + -flag=x + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be: + 1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package fla9 + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ErrHelp is the error returned if the -help or -h flag is invoked +// but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +// +// Set is called once, in command line order, for each flag present. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how FlagSet.Parse behaves if the parse fails. +type ErrorHandling int + +// These constants cause FlagSet.Parse to behave as described if the parse fails. +const ( + ContinueOnError ErrorHandling = iota // Return a descriptive error. + ExitOnError // Call os.Exit(2). + PanicOnError // Call panic with a descriptive error. +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + envPrefix string // prefix to all env variable names + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { CommandLine.VisitAll(fn) } + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { CommandLine.Visit(fn) } + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { return CommandLine.formal[name] } + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { return CommandLine.Set(name, value) } + +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(flag *Flag, value string) bool { + // Build a zero value of the flag's Value type, and see if the + // result of calling its String method equals the value passed in. + // This works unless the Value type is itself an interface type. + typ := reflect.TypeOf(flag.Value) + var z reflect.Value + if typ.Kind() == reflect.Ptr { + z = reflect.New(typ.Elem()) + } else { + z = reflect.Zero(typ) + } + if value == z.Interface().(Value).String() { + return true + } + + switch value { + case "false", "", "0": + return true + } + return false +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + // No explicit name, so use type if we can find one. + name = "value" + switch flag.Value.(type) { + case boolFlag: + name = "" + case *durationValue: + name = "duration" + case *float64Value: + name = "float" + case *intValue, *int64Value: + name = "int" + case *stringValue: + name = "string" + case *uintValue, *uint64Value: + name = "uint" + } + return +} + +// PrintDefaults prints to standard error the default values of all +// defined command-line flags in the set. See the documentation for +// the global function PrintDefaults for more information. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + s := fmt.Sprintf(" -%s", flag.Name) // Two spaces before -; see next two comments. + name, usage := UnquoteUsage(flag) + if len(name) > 0 { + s += " " + name + } + // Boolean flags of one ASCII letter are so common we + // treat them specially, putting their usage on the same line. + if len(s) <= 4 { // space, space, '-', 'x'. + s += "\t" + } else { + // Four spaces before the tab triggers good alignment + // for both 4- and 8-space tab stops. + s += "\n \t" + } + s += usage + if !isZeroValue(flag, flag.DefValue) { + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + s += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + s += fmt.Sprintf(" (default %v)", flag.DefValue) + } + } + fmt.Fprint(f.out(), s, "\n") + }) +} + +// PrintDefaults prints, to standard error unless configured otherwise, +// a usage message showing the default settings of all defined +// command-line flags. +// For an integer valued flag x, the default output has the form +// -x int +// usage-message-for-x (default 7) +// The usage message will appear on a separate line for anything but +// a bool flag with a one-byte name. For bool flags, the type is +// omitted and if the flag name is one byte the usage message appears +// on the same line. The parenthetical default is omitted if the +// default is the zero value for the type. The listed type, here int, +// can be changed by placing a back-quoted name in the flag's usage +// string; the first such item in the message is taken to be a parameter +// name to show in the message and the back quotes are stripped from +// the message when displayed. For instance, given +// flag.String("I", "", "search `directory` for include files") +// the output will be +// -I directory +// search directory for include files. +func PrintDefaults() { CommandLine.PrintDefaults() } + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.out(), "Usage:\n") + } else { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// It is called when an error occurs while parsing flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. Arg returns an empty string if the +// requested element does not exist. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. Arg returns an empty string if the +// requested element does not exist. +func Arg(i int) string { return CommandLine.Arg(i) } + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.Var(newBoolValue(value, p), name, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), name, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, name, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return CommandLine.Bool(name, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.Var(newIntValue(value, p), name, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), name, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVar(p, name, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.Int(name, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.Var(newInt64Value(value, p), name, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), name, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, name, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64(name, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.Var(newUintValue(value, p), name, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), name, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, name, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { return CommandLine.Uint(name, value, usage) } + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.Var(newUint64Value(value, p), name, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), name, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, name, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(name, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name, value, usage string) { + f.Var(newStringValue(value, p), name, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name, value, usage string) { + CommandLine.Var(newStringValue(value, p), name, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name, value, usage string) *string { + p := new(string) + f.StringVar(p, name, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name, value, usage string) *string { + return CommandLine.String(name, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.Var(newFloat64Value(value, p), name, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), name, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, name, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64(name, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), name, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), name, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, name, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(name, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{name, usage, value, value.String()} + _, alreadythere := f.formal[name] + if alreadythere { + var msg string + if f.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + } + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name, usage string) { + CommandLine.Var(value, name, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set if one is specified, +// or the appropriate default usage function otherwise. +func (f *FlagSet) usage() { + if f.Usage == nil { + if f == CommandLine { + Usage() + } else { + defaultUsage(f) + } + } else { + f.Usage() + } +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (f *FlagSet) parseOne() (bool, error) { + if len(f.args) == 0 { + return false, nil + } + s := f.args[0] + if len(s) < 2 || s[0] != '-' { + return false, nil + } + numMinuses := 1 + if s[1] == '-' { + numMinuses++ + if len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, nil + } + } + name := s[numMinuses:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + return false, f.failf("bad flag syntax: %s", s) + } + + // ignore go test flags + if strings.HasPrefix(name, "test.") { + return false, nil + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + hasValue := false + value := "" + for i := 1; i < len(name); i++ { // equals cannot be first + if name[i] == '=' { + value = name[i+1:] + hasValue = true + name = name[0:i] + break + } + } + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + return false, f.failf("flag provided but not defined: -%s", name) + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + if err := fv.Set("true"); err != nil { + return false, f.failf("invalid boolean flag %s: %v", name, err) + } + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(f.args) > 0 { + // value is the next arg + hasValue = true + value, f.args = f.args[0], f.args[1:] + } + if !hasValue { + return false, f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return true, nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help or -h were set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + if _, ok := f.formal[DefaultConfigFlagName]; !ok { + f.String(DefaultConfigFlagName, "", "a file of command line options, each line in optionName=optionValue format") + } + + f.parsed = true + f.args = arguments + for { + seen, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + + // Parse environment variables + if err := f.ParseEnv(os.Environ()); err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + return err + } + + // Parse configuration from file + var cFile string + if cf := f.formal[DefaultConfigFlagName]; cf != nil { + cFile = cf.Value.String() + } + if cf := f.actual[DefaultConfigFlagName]; cf != nil { + cFile = cf.Value.String() + } + + if cFile == "" { + cFile = f.findConfigArgInUnresolved() + } + + if cFile != "" { + if err := f.ParseFile(cFile, true); err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + return err + } + } + + return nil +} + +func (f *FlagSet) findConfigArgInUnresolved() string { + configArg := "-" + DefaultConfigFlagName + for i := 0; i < len(f.args); i++ { + if strings.HasPrefix(f.args[i], configArg) { + if f.args[i] == configArg && i+1 < len(f.args) { + return f.args[i+1] + } + + if strings.HasPrefix(f.args[i], configArg+"=") { + return f.args[i][len(configArg)+1:] + break + } + } + } + return "" +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { return f.parsed } + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed reports whether the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and so on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + envPrefix: EnvPrefix, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name, EnvPrefix, and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.envPrefix = EnvPrefix + f.errorHandling = errorHandling +} + +// EnvPrefix defines a string that will be implicitly prefixed to a +// flag name before looking it up in the environment variables. +var EnvPrefix = "WEED" + +// ParseEnv parses flags from environment variables. +// Flags already set will be ignored. +func (f *FlagSet) ParseEnv(environ []string) error { + env := make(map[string]string) + for _, s := range environ { + if i := strings.Index(s, "="); i >= 1 { + env[s[0:i]] = s[i+1:] + } + } + + for _, flag := range f.formal { + name := flag.Name + if _, set := f.actual[name]; set { + continue + } + + flag, alreadyThere := f.formal[name] + if !alreadyThere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return ErrHelp + } + + return f.failf("environment variable provided but not defined: %s", name) + } + + envKey := strings.ToUpper(flag.Name) + if f.envPrefix != "" { + envKey = f.envPrefix + "_" + envKey + } + envKey = strings.Replace(envKey, "-", "_", -1) + + value, isSet := env[envKey] + if !isSet { + continue + } + + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() && value == "" { + // special case: doesn't need an arg + // flag without value is regarded a bool + value = ("true") + } + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid value %q for environment variable %s: %v", value, name, err) + } + + // update f.actual + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + } + return nil +} + +// NewFlagSetWithEnvPrefix returns a new empty flag set with the specified name, +// environment variable prefix, and error handling property. +func NewFlagSetWithEnvPrefix(name string, prefix string, errorHandling ErrorHandling) *FlagSet { + f := NewFlagSet(name, errorHandling) + f.envPrefix = prefix + return f +} + +// DefaultConfigFlagName defines the flag name of the optional config file +// path. Used to lookup and parse the config file when a default is set and +// available on disk. +var DefaultConfigFlagName = "options" + +// ParseFile parses flags from the file in path. +// Same format as commandline arguments, newlines and lines beginning with a +// "#" character are ignored. Flags already set will be ignored. +func (f *FlagSet) ParseFile(path string, ignoreUndefinedConf bool) error { + fp, err := os.Open(path) // Extract arguments from file + if err != nil { + return err + } + defer fp.Close() + + scanner := bufio.NewScanner(fp) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Ignore empty lines or comments + if line == "" || line[:1] == "#" || line[:1] == "//" || line[:1] == "--" { + continue + } + + // Match `key=value` and `key value` + name, value := line, "" + for i, v := range line { + if v == '=' || v == ' ' || v == ':' { + name, value = strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + break + } + } + + name = strings.TrimPrefix(name, "-") + + // Ignore flag when already set; arguments have precedence over file + if f.actual[name] != nil { + continue + } + + flag, alreadyThere := f.formal[name] + if !alreadyThere { + if ignoreUndefinedConf { + continue + } + + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return ErrHelp + } + return f.failf("configuration variable provided but not defined: %s", name) + } + + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() && value == "" { + // special case: doesn't need an arg + value = "true" + } + + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid value %q for configuration variable %s: %v", value, name, err) + } + + // update f.actual + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + } + + return scanner.Err() +} diff --git a/weed/filer2/fullpath.go b/weed/util/fullpath.go index 191e51cf3..f2119707e 100644 --- a/weed/filer2/fullpath.go +++ b/weed/util/fullpath.go @@ -1,4 +1,4 @@ -package filer2 +package util import ( "path/filepath" @@ -13,6 +13,7 @@ func NewFullPath(dir, name string) FullPath { func (fp FullPath) DirAndName() (string, string) { dir, name := filepath.Split(string(fp)) + name = strings.ToValidUTF8(name, "?") if dir == "/" { return dir, name } @@ -24,6 +25,7 @@ func (fp FullPath) DirAndName() (string, string) { func (fp FullPath) Name() string { _, name := filepath.Split(string(fp)) + name = strings.ToValidUTF8(name, "?") return name } @@ -34,3 +36,23 @@ func (fp FullPath) Child(name string) FullPath { } return FullPath(dir + "/" + name) } + +func (fp FullPath) AsInode() uint64 { + return uint64(HashStringToLong(string(fp))) +} + +// split, but skipping the root +func (fp FullPath) Split() []string { + if fp == "" || fp == "/" { + return []string{} + } + return strings.Split(string(fp)[1:], "/") +} + +func Join(names ...string) string { + return filepath.ToSlash(filepath.Join(names...)) +} + +func JoinPath(names ...string) FullPath { + return FullPath(Join(names...)) +} diff --git a/weed/util/pprof.go b/weed/util/grace/pprof.go index a2621ceee..14686bfc8 100644 --- a/weed/util/pprof.go +++ b/weed/util/grace/pprof.go @@ -1,4 +1,4 @@ -package util +package grace import ( "os" diff --git a/weed/util/signal_handling.go b/weed/util/grace/signal_handling.go index 99447e8be..7cca46764 100644 --- a/weed/util/signal_handling.go +++ b/weed/util/grace/signal_handling.go @@ -1,6 +1,6 @@ // +build !plan9 -package util +package grace import ( "os" diff --git a/weed/util/signal_handling_notsupported.go b/weed/util/grace/signal_handling_notsupported.go index c389cfb7e..5335915a1 100644 --- a/weed/util/signal_handling_notsupported.go +++ b/weed/util/grace/signal_handling_notsupported.go @@ -1,6 +1,6 @@ // +build plan9 -package util +package grace func OnInterrupt(fn func()) { } diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go deleted file mode 100644 index 31497ad35..000000000 --- a/weed/util/grpc_client_server.go +++ /dev/null @@ -1,120 +0,0 @@ -package util - -import ( - "context" - "fmt" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -var ( - // cache grpc connections - grpcClients = make(map[string]*grpc.ClientConn) - grpcClientsLock sync.Mutex -) - -func init() { - http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024 -} - -func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { - var options []grpc.ServerOption - options = append(options, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 10 * time.Second, // wait time before ping if no activity - Timeout: 20 * time.Second, // ping timeout - }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 60 * time.Second, // min time a client should wait before sending a ping - })) - for _, opt := range opts { - if opt != nil { - options = append(options, opt) - } - } - return grpc.NewServer(options...) -} - -func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - // opts = append(opts, grpc.WithBlock()) - // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) - var options []grpc.DialOption - options = append(options, - // grpc.WithInsecure(), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 30 * time.Second, // client ping server if no activity for this long - Timeout: 20 * time.Second, - })) - for _, opt := range opts { - if opt != nil { - options = append(options, opt) - } - } - return grpc.DialContext(ctx, address, options...) -} - -func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { - - grpcClientsLock.Lock() - - existingConnection, found := grpcClients[address] - if found { - grpcClientsLock.Unlock() - return fn(existingConnection) - } - - grpcConnection, err := GrpcDial(ctx, address, opts...) - if err != nil { - grpcClientsLock.Unlock() - return fmt.Errorf("fail to dial %s: %v", address, err) - } - - grpcClients[address] = grpcConnection - grpcClientsLock.Unlock() - - err = fn(grpcConnection) - if err != nil { - grpcClientsLock.Lock() - delete(grpcClients, address) - grpcClientsLock.Unlock() - grpcConnection.Close() - } - - return err -} - -func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { - colonIndex := strings.LastIndex(server, ":") - if colonIndex < 0 { - return "", fmt.Errorf("server should have hostname:port format: %v", server) - } - - port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("server port parse error: %v", parseErr) - } - - grpcPort := int(port) + 10000 - - return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil -} - -func ServerToGrpcAddress(server string) (serverGrpcAddress string) { - hostnameAndPort := strings.Split(server, ":") - if len(hostnameAndPort) != 2 { - return fmt.Sprintf("unexpected server address: %s", server) - } - - port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1]) - } - - grpcPort := int(port) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) -} diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 667d0b4be..1630760b1 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -1,7 +1,6 @@ package util import ( - "bytes" "compress/gzip" "encoding/json" "errors" @@ -11,6 +10,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -20,6 +21,7 @@ var ( func init() { Transport = &http.Transport{ + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, } client = &http.Client{ @@ -27,22 +29,6 @@ func init() { } } -func PostBytes(url string, body []byte) ([]byte, error) { - r, err := client.Post(url, "", bytes.NewReader(body)) - if err != nil { - return nil, fmt.Errorf("Post to %s: %v", url, err) - } - defer r.Body.Close() - if r.StatusCode >= 400 { - return nil, fmt.Errorf("%s: %s", url, r.Status) - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, fmt.Errorf("Read response body: %v", err) - } - return b, nil -} - func Post(url string, values url.Values) ([]byte, error) { r, err := client.PostForm(url, values) if err != nil { @@ -65,20 +51,35 @@ func Post(url string, values url.Values) ([]byte, error) { // github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go // may need increasing http.Client.Timeout -func Get(url string) ([]byte, error) { - r, err := client.Get(url) +func Get(url string) ([]byte, bool, error) { + + request, err := http.NewRequest("GET", url, nil) + request.Header.Add("Accept-Encoding", "gzip") + + response, err := client.Do(request) if err != nil { - return nil, err + return nil, true, err } - defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) - if r.StatusCode >= 400 { - return nil, fmt.Errorf("%s: %s", url, r.Status) + defer response.Body.Close() + + var reader io.ReadCloser + switch response.Header.Get("Content-Encoding") { + case "gzip": + reader, err = gzip.NewReader(response.Body) + defer reader.Close() + default: + reader = response.Body + } + + b, err := ioutil.ReadAll(reader) + if response.StatusCode >= 400 { + retryable := response.StatusCode >= 500 + return nil, retryable, fmt.Errorf("%s: %s", url, response.Status) } if err != nil { - return nil, err + return nil, false, err } - return b, nil + return b, false, nil } func Head(url string) (http.Header, error) { @@ -86,7 +87,7 @@ func Head(url string) (http.Header, error) { if err != nil { return nil, err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode >= 400 { return nil, fmt.Errorf("%s: %s", url, r.Status) } @@ -115,7 +116,7 @@ func Delete(url string, jwt string) error { return nil } m := make(map[string]interface{}) - if e := json.Unmarshal(body, m); e == nil { + if e := json.Unmarshal(body, &m); e == nil { if s, ok := m["error"].(string); ok { return errors.New(s) } @@ -123,12 +124,33 @@ func Delete(url string, jwt string) error { return errors.New(string(body)) } +func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err error) { + req, err := http.NewRequest("DELETE", url, nil) + if jwt != "" { + req.Header.Set("Authorization", "BEARER "+string(jwt)) + } + if err != nil { + return + } + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + httpStatus = resp.StatusCode + return +} + func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error { r, err := client.PostForm(url, values) if err != nil { return err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -151,14 +173,14 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e if err != nil { return err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } return readFn(r.Body) } -func DownloadFile(fileUrl string) (filename string, header http.Header, rc io.ReadCloser, e error) { +func DownloadFile(fileUrl string) (filename string, header http.Header, resp *http.Response, e error) { response, err := client.Get(fileUrl) if err != nil { return "", nil, nil, err @@ -172,7 +194,7 @@ func DownloadFile(fileUrl string) (filename string, header http.Header, rc io.Re filename = strings.Trim(filename, "\"") } } - rc = response.Body + resp = response return } @@ -187,14 +209,22 @@ func NormalizeUrl(url string) string { return "http://" + url } -func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (int64, error) { +func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { + + if cipherKey != nil { + var n int + _, err := readEncryptedUrl(fileUrl, cipherKey, isContentCompressed, isFullChunk, offset, size, func(data []byte) { + n = copy(buf, data) + }) + return int64(n), err + } req, err := http.NewRequest("GET", fileUrl, nil) if err != nil { return 0, err } - if isReadRange { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + if !isFullChunk { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } else { req.Header.Set("Accept-Encoding", "gzip") } @@ -210,7 +240,8 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo } var reader io.ReadCloser - switch r.Header.Get("Content-Encoding") { + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { case "gzip": reader, err = gzip.NewReader(r.Body) defer reader.Close() @@ -242,44 +273,131 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo // drains the response body to avoid memory leak data, _ := ioutil.ReadAll(reader) if len(data) != 0 { - err = fmt.Errorf("buffer size is too small. remains %d", len(data)) + glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) } return n, err } -func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (int64, error) { +func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (retryable bool, err error) { + + if cipherKey != nil { + return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn) + } req, err := http.NewRequest("GET", fileUrl, nil) if err != nil { - return 0, err + return false, err + } + + if isFullChunk { + req.Header.Add("Accept-Encoding", "gzip") + } else { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) r, err := client.Do(req) if err != nil { - return 0, err + return true, err } - defer r.Body.Close() + defer CloseResponse(r) if r.StatusCode >= 400 { - return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) + retryable = r.StatusCode >= 500 + return retryable, fmt.Errorf("%s: %s", fileUrl, r.Status) + } + + var reader io.ReadCloser + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { + case "gzip": + reader, err = gzip.NewReader(r.Body) + defer reader.Close() + default: + reader = r.Body } var ( m int - n int64 ) buf := make([]byte, 64*1024) for { - m, err = r.Body.Read(buf) + m, err = reader.Read(buf) fn(buf[:m]) - n += int64(m) if err == io.EOF { - return n, nil + return false, nil } if err != nil { - return n, err + return false, err } } } + +func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (bool, error) { + encryptedData, retryable, err := Get(fileUrl) + if err != nil { + return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err) + } + decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey)) + if err != nil { + return false, fmt.Errorf("decrypt %s: %v", fileUrl, err) + } + if isContentCompressed { + decryptedData, err = DecompressData(decryptedData) + if err != nil { + glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err) + } + } + if len(decryptedData) < int(offset)+size { + return false, fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size) + } + if isFullChunk { + fn(decryptedData) + } else { + fn(decryptedData[int(offset) : int(offset)+size]) + } + return false, nil +} + +func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) { + + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return nil, err + } + if rangeHeader != "" { + req.Header.Add("Range", rangeHeader) + } else { + req.Header.Add("Accept-Encoding", "gzip") + } + + r, err := client.Do(req) + if err != nil { + return nil, err + } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) + } + + var reader io.ReadCloser + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { + case "gzip": + reader, err = gzip.NewReader(r.Body) + defer reader.Close() + default: + reader = r.Body + } + + return reader, nil +} + +func CloseResponse(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() +} + +func CloseRequest(req *http.Request) { + io.Copy(ioutil.Discard, req.Body) + req.Body.Close() +} diff --git a/weed/util/inits.go b/weed/util/inits.go new file mode 100644 index 000000000..378878012 --- /dev/null +++ b/weed/util/inits.go @@ -0,0 +1,52 @@ +package util + +import ( + "fmt" + "sort" +) + +// HumanReadableIntsMax joins a serials of inits into a smart one like 1-3 5 ... for human readable. +func HumanReadableIntsMax(max int, ids ...int) string { + if len(ids) <= max { + return HumanReadableInts(ids...) + } + + return HumanReadableInts(ids[:max]...) + " ..." +} + +// HumanReadableInts joins a serials of inits into a smart one like 1-3 5 7-10 for human readable. +func HumanReadableInts(ids ...int) string { + sort.Ints(ids) + + s := "" + start := 0 + last := 0 + + for i, v := range ids { + if i == 0 { + start = v + last = v + s = fmt.Sprintf("%d", v) + continue + } + + if last+1 == v { + last = v + continue + } + + if last > start { + s += fmt.Sprintf("-%d", last) + } + + s += fmt.Sprintf(" %d", v) + start = v + last = v + } + + if last != start { + s += fmt.Sprintf("-%d", last) + } + + return s +} diff --git a/weed/util/inits_test.go b/weed/util/inits_test.go new file mode 100644 index 000000000..f2c9b701f --- /dev/null +++ b/weed/util/inits_test.go @@ -0,0 +1,19 @@ +package util + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHumanReadableIntsMax(t *testing.T) { + assert.Equal(t, "1-2 ...", HumanReadableIntsMax(2, 1, 2, 3)) + assert.Equal(t, "1 3 ...", HumanReadableIntsMax(2, 1, 3, 5)) +} + +func TestHumanReadableInts(t *testing.T) { + assert.Equal(t, "1-3", HumanReadableInts(1, 2, 3)) + assert.Equal(t, "1 3", HumanReadableInts(1, 3)) + assert.Equal(t, "1 3 5", HumanReadableInts(5, 1, 3)) + assert.Equal(t, "1-3 5", HumanReadableInts(1, 2, 3, 5)) + assert.Equal(t, "1-3 5 7-9", HumanReadableInts(7, 9, 8, 1, 2, 3, 5)) +} diff --git a/weed/util/limiter.go b/weed/util/limiter.go new file mode 100644 index 000000000..2debaaa85 --- /dev/null +++ b/weed/util/limiter.go @@ -0,0 +1,114 @@ +package util + +import ( + "math/rand" + "reflect" + "sync" + "sync/atomic" +) + +// initial version comes from https://github.com/korovkin/limiter/blob/master/limiter.go + +// LimitedConcurrentExecutor object +type LimitedConcurrentExecutor struct { + limit int + tokenChan chan int +} + +func NewLimitedConcurrentExecutor(limit int) *LimitedConcurrentExecutor { + + // allocate a limiter instance + c := &LimitedConcurrentExecutor{ + limit: limit, + tokenChan: make(chan int, limit), + } + + // allocate the tokenChan: + for i := 0; i < c.limit; i++ { + c.tokenChan <- i + } + + return c +} + +// Execute adds a function to the execution queue. +// if num of go routines allocated by this instance is < limit +// launch a new go routine to execute job +// else wait until a go routine becomes available +func (c *LimitedConcurrentExecutor) Execute(job func()) { + token := <-c.tokenChan + go func() { + defer func() { + c.tokenChan <- token + }() + // run the job + job() + }() +} + +// a different implementation, but somehow more "conservative" +type OperationRequest func() + +type LimitedOutOfOrderProcessor struct { + processorSlots uint32 + processors []chan OperationRequest + processorLimit int32 + processorLimitCond *sync.Cond + currentProcessor int32 +} + +func NewLimitedOutOfOrderProcessor(limit int32) (c *LimitedOutOfOrderProcessor) { + + processorSlots := uint32(32) + c = &LimitedOutOfOrderProcessor{ + processorSlots: processorSlots, + processors: make([]chan OperationRequest, processorSlots), + processorLimit: limit, + processorLimitCond: sync.NewCond(new(sync.Mutex)), + } + + for i := 0; i < int(processorSlots); i++ { + c.processors[i] = make(chan OperationRequest) + } + + cases := make([]reflect.SelectCase, processorSlots) + for i, ch := range c.processors { + cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)} + } + + go func() { + for { + _, value, ok := reflect.Select(cases) + if !ok { + continue + } + + request := value.Interface().(OperationRequest) + + if c.processorLimit > 0 { + c.processorLimitCond.L.Lock() + for atomic.LoadInt32(&c.currentProcessor) > c.processorLimit { + c.processorLimitCond.Wait() + } + atomic.AddInt32(&c.currentProcessor, 1) + c.processorLimitCond.L.Unlock() + } + + go func() { + if c.processorLimit > 0 { + defer atomic.AddInt32(&c.currentProcessor, -1) + defer c.processorLimitCond.Signal() + } + request() + }() + + } + }() + + return c +} + +func (c *LimitedOutOfOrderProcessor) Execute(request OperationRequest) { + index := rand.Uint32() % c.processorSlots + c.processors[index] <- request +} diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go new file mode 100644 index 000000000..f84c674ff --- /dev/null +++ b/weed/util/log_buffer/log_buffer.go @@ -0,0 +1,292 @@ +package log_buffer + +import ( + "bytes" + "sync" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const BufferSize = 4 * 1024 * 1024 +const PreviousBufferCount = 3 + +type dataToFlush struct { + startTime time.Time + stopTime time.Time + data *bytes.Buffer +} + +type LogBuffer struct { + prevBuffers *SealedBuffers + buf []byte + idx []int + pos int + startTime time.Time + stopTime time.Time + lastFlushTime time.Time + sizeBuf []byte + flushInterval time.Duration + flushFn func(startTime, stopTime time.Time, buf []byte) + notifyFn func() + isStopping bool + flushChan chan *dataToFlush + lastTsNs int64 + sync.RWMutex +} + +func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer { + lb := &LogBuffer{ + prevBuffers: newSealedBuffers(PreviousBufferCount), + buf: make([]byte, BufferSize), + sizeBuf: make([]byte, 4), + flushInterval: flushInterval, + flushFn: flushFn, + notifyFn: notifyFn, + flushChan: make(chan *dataToFlush, 256), + } + go lb.loopFlush() + go lb.loopInterval() + return lb +} + +func (m *LogBuffer) AddToBuffer(partitionKey, data []byte, eventTsNs int64) { + + m.Lock() + defer func() { + m.Unlock() + if m.notifyFn != nil { + m.notifyFn() + } + }() + + // need to put the timestamp inside the lock + var ts time.Time + if eventTsNs == 0 { + ts = time.Now() + eventTsNs = ts.UnixNano() + } else { + ts = time.Unix(0, eventTsNs) + } + if m.lastTsNs >= eventTsNs { + // this is unlikely to happen, but just in case + eventTsNs = m.lastTsNs + 1 + ts = time.Unix(0, eventTsNs) + } + m.lastTsNs = eventTsNs + logEntry := &filer_pb.LogEntry{ + TsNs: eventTsNs, + PartitionKeyHash: util.HashToInt32(partitionKey), + Data: data, + } + + logEntryData, _ := proto.Marshal(logEntry) + + size := len(logEntryData) + + if m.pos == 0 { + m.startTime = ts + } + + if m.startTime.Add(m.flushInterval).Before(ts) || len(m.buf)-m.pos < size+4 { + m.flushChan <- m.copyToFlush() + m.startTime = ts + if len(m.buf) < size+4 { + m.buf = make([]byte, 2*size+4) + } + } + m.stopTime = ts + + m.idx = append(m.idx, m.pos) + util.Uint32toBytes(m.sizeBuf, uint32(size)) + copy(m.buf[m.pos:m.pos+4], m.sizeBuf) + copy(m.buf[m.pos+4:m.pos+4+size], logEntryData) + m.pos += size + 4 + + // fmt.Printf("entry size %d total %d count %d, buffer:%p\n", size, m.pos, len(m.idx), m) + +} + +func (m *LogBuffer) Shutdown() { + m.Lock() + defer m.Unlock() + + if m.isStopping { + return + } + m.isStopping = true + toFlush := m.copyToFlush() + m.flushChan <- toFlush + close(m.flushChan) +} + +func (m *LogBuffer) loopFlush() { + for d := range m.flushChan { + if d != nil { + // fmt.Printf("flush [%v, %v] size %d\n", d.startTime, d.stopTime, len(d.data.Bytes())) + m.flushFn(d.startTime, d.stopTime, d.data.Bytes()) + d.releaseMemory() + m.lastFlushTime = d.stopTime + } + } +} + +func (m *LogBuffer) loopInterval() { + for !m.isStopping { + time.Sleep(m.flushInterval) + m.Lock() + if m.isStopping { + m.Unlock() + return + } + // println("loop interval") + toFlush := m.copyToFlush() + m.flushChan <- toFlush + m.Unlock() + } +} + +func (m *LogBuffer) copyToFlush() *dataToFlush { + + if m.pos > 0 { + // fmt.Printf("flush buffer %d pos %d empty space %d\n", len(m.buf), m.pos, len(m.buf)-m.pos) + var d *dataToFlush + if m.flushFn != nil { + d = &dataToFlush{ + startTime: m.startTime, + stopTime: m.stopTime, + data: copiedBytes(m.buf[:m.pos]), + } + } + // fmt.Printf("flusing [0,%d) with %d entries\n", m.pos, len(m.idx)) + m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf, m.pos) + m.pos = 0 + m.idx = m.idx[:0] + return d + } + return nil +} + +func (d *dataToFlush) releaseMemory() { + d.data.Reset() + bufferPool.Put(d.data) +} + +func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Buffer, err error) { + m.RLock() + defer m.RUnlock() + + if !m.lastFlushTime.IsZero() && m.lastFlushTime.After(lastReadTime) { + return nil, ResumeFromDiskError + } + + /* + fmt.Printf("read buffer %p: %v last stop time: [%v,%v], pos %d, entries:%d, prevBufs:%d\n", m, lastReadTime, m.startTime, m.stopTime, m.pos, len(m.idx), len(m.prevBuffers.buffers)) + for i, prevBuf := range m.prevBuffers.buffers { + fmt.Printf(" prev %d : %s\n", i, prevBuf.String()) + } + */ + + if lastReadTime.Equal(m.stopTime) { + return nil, nil + } + if lastReadTime.After(m.stopTime) { + // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime) + return nil, nil + } + if lastReadTime.Before(m.startTime) { + // println("checking ", lastReadTime.UnixNano()) + for i, buf := range m.prevBuffers.buffers { + if buf.startTime.After(lastReadTime) { + if i == 0 { + // println("return the earliest in memory", buf.startTime.UnixNano()) + return copiedBytes(buf.buf[:buf.size]), nil + } + // println("return the", i, "th in memory", buf.startTime.UnixNano()) + return copiedBytes(buf.buf[:buf.size]), nil + } + if !buf.startTime.After(lastReadTime) && buf.stopTime.After(lastReadTime) { + pos := buf.locateByTs(lastReadTime) + // fmt.Printf("locate buffer[%d] pos %d\n", i, pos) + return copiedBytes(buf.buf[pos:buf.size]), nil + } + } + // println("return the current buf", lastReadTime.UnixNano()) + return copiedBytes(m.buf[:m.pos]), nil + } + + lastTs := lastReadTime.UnixNano() + l, h := 0, len(m.idx)-1 + + /* + for i, pos := range m.idx { + logEntry, ts := readTs(m.buf, pos) + event := &filer_pb.SubscribeMetadataResponse{} + proto.Unmarshal(logEntry.Data, event) + entry := event.EventNotification.OldEntry + if entry == nil { + entry = event.EventNotification.NewEntry + } + fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name) + } + fmt.Printf("l=%d, h=%d\n", l, h) + */ + + for l <= h { + mid := (l + h) / 2 + pos := m.idx[mid] + _, t := readTs(m.buf, pos) + if t <= lastTs { + l = mid + 1 + } else if lastTs < t { + var prevT int64 + if mid > 0 { + _, prevT = readTs(m.buf, m.idx[mid-1]) + } + if prevT <= lastTs { + // fmt.Printf("found l=%d, m-1=%d(ts=%d), m=%d(ts=%d), h=%d [%d, %d) \n", l, mid-1, prevT, mid, t, h, pos, m.pos) + return copiedBytes(m.buf[pos:m.pos]), nil + } + h = mid + } + // fmt.Printf("l=%d, h=%d\n", l, h) + } + + // FIXME: this could be that the buffer has been flushed already + return nil, nil + +} +func (m *LogBuffer) ReleaseMemory(b *bytes.Buffer) { + bufferPool.Put(b) +} + +var bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func copiedBytes(buf []byte) (copied *bytes.Buffer) { + copied = bufferPool.Get().(*bytes.Buffer) + copied.Reset() + copied.Write(buf) + return +} + +func readTs(buf []byte, pos int) (size int, ts int64) { + + size = int(util.BytesToUint32(buf[pos : pos+4])) + entryData := buf[pos+4 : pos+4+size] + logEntry := &filer_pb.LogEntry{} + + err := proto.Unmarshal(entryData, logEntry) + if err != nil { + glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err) + } + return size, logEntry.TsNs + +} diff --git a/weed/util/log_buffer/log_buffer_test.go b/weed/util/log_buffer/log_buffer_test.go new file mode 100644 index 000000000..3d77afb18 --- /dev/null +++ b/weed/util/log_buffer/log_buffer_test.go @@ -0,0 +1,42 @@ +package log_buffer + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func TestNewLogBufferFirstBuffer(t *testing.T) { + lb := NewLogBuffer(time.Minute, func(startTime, stopTime time.Time, buf []byte) { + + }, func() { + + }) + + startTime := time.Now() + + messageSize := 1024 + messageCount := 5000 + var buf = make([]byte, messageSize) + for i := 0; i < messageCount; i++ { + rand.Read(buf) + lb.AddToBuffer(nil, buf, 0) + } + + receivedmessageCount := 0 + lb.LoopProcessLogData(startTime, func() bool { + // stop if no more messages + return false + }, func(logEntry *filer_pb.LogEntry) error { + receivedmessageCount++ + return nil + }) + + if receivedmessageCount != messageCount { + fmt.Printf("sent %d received %d\n", messageCount, receivedmessageCount) + } + +} diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go new file mode 100644 index 000000000..d6917abfe --- /dev/null +++ b/weed/util/log_buffer/log_read.go @@ -0,0 +1,89 @@ +package log_buffer + +import ( + "bytes" + "fmt" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + ResumeError = fmt.Errorf("resume") + ResumeFromDiskError = fmt.Errorf("resumeFromDisk") +) + +func (logBuffer *LogBuffer) LoopProcessLogData( + startTreadTime time.Time, + waitForDataFn func() bool, + eachLogDataFn func(logEntry *filer_pb.LogEntry) error) (lastReadTime time.Time, err error) { + // loop through all messages + var bytesBuf *bytes.Buffer + lastReadTime = startTreadTime + defer func() { + if bytesBuf != nil { + logBuffer.ReleaseMemory(bytesBuf) + } + }() + + for { + + if bytesBuf != nil { + logBuffer.ReleaseMemory(bytesBuf) + } + bytesBuf, err = logBuffer.ReadFromBuffer(lastReadTime) + if err == ResumeFromDiskError { + return lastReadTime, ResumeFromDiskError + } + // fmt.Printf("ReadFromBuffer by %v\n", lastReadTime) + if bytesBuf == nil { + if waitForDataFn() { + continue + } else { + return + } + } + + buf := bytesBuf.Bytes() + // fmt.Printf("ReadFromBuffer by %v size %d\n", lastReadTime, len(buf)) + + batchSize := 0 + var startReadTime time.Time + + for pos := 0; pos+4 < len(buf); { + + size := util.BytesToUint32(buf[pos : pos+4]) + if pos+4+int(size) > len(buf) { + err = ResumeError + glog.Errorf("LoopProcessLogData: read buffer %v read %d [%d,%d) from [0,%d)", lastReadTime, batchSize, pos, pos+int(size)+4, len(buf)) + return + } + entryData := buf[pos+4 : pos+4+int(size)] + + logEntry := &filer_pb.LogEntry{} + if err = proto.Unmarshal(entryData, logEntry); err != nil { + glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) + pos += 4 + int(size) + continue + } + lastReadTime = time.Unix(0, logEntry.TsNs) + if startReadTime.IsZero() { + startReadTime = lastReadTime + } + + if err = eachLogDataFn(logEntry); err != nil { + return + } + + pos += 4 + int(size) + batchSize++ + } + + // fmt.Printf("sent message ts[%d,%d] size %d\n", startReadTime.UnixNano(), lastReadTime.UnixNano(), batchSize) + } + +} diff --git a/weed/util/log_buffer/sealed_buffer.go b/weed/util/log_buffer/sealed_buffer.go new file mode 100644 index 000000000..d133cf8d3 --- /dev/null +++ b/weed/util/log_buffer/sealed_buffer.go @@ -0,0 +1,62 @@ +package log_buffer + +import ( + "fmt" + "time" +) + +type MemBuffer struct { + buf []byte + size int + startTime time.Time + stopTime time.Time +} + +type SealedBuffers struct { + buffers []*MemBuffer +} + +func newSealedBuffers(size int) *SealedBuffers { + sbs := &SealedBuffers{} + + sbs.buffers = make([]*MemBuffer, size) + for i := 0; i < size; i++ { + sbs.buffers[i] = &MemBuffer{ + buf: make([]byte, BufferSize), + } + } + + return sbs +} + +func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte, pos int) (newBuf []byte) { + oldMemBuffer := sbs.buffers[0] + size := len(sbs.buffers) + for i := 0; i < size-1; i++ { + sbs.buffers[i].buf = sbs.buffers[i+1].buf + sbs.buffers[i].size = sbs.buffers[i+1].size + sbs.buffers[i].startTime = sbs.buffers[i+1].startTime + sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime + } + sbs.buffers[size-1].buf = buf + sbs.buffers[size-1].size = pos + sbs.buffers[size-1].startTime = startTime + sbs.buffers[size-1].stopTime = stopTime + return oldMemBuffer.buf +} + +func (mb *MemBuffer) locateByTs(lastReadTime time.Time) (pos int) { + lastReadTs := lastReadTime.UnixNano() + for pos < len(mb.buf) { + size, t := readTs(mb.buf, pos) + if t > lastReadTs { + return + } + pos += size + 4 + } + return len(mb.buf) +} + +func (mb *MemBuffer) String() string { + return fmt.Sprintf("[%v,%v] bytes:%d", mb.startTime, mb.stopTime, mb.size) +} diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index b8068e67f..e8075c297 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -35,6 +35,7 @@ type Conn struct { net.Conn ReadTimeout time.Duration WriteTimeout time.Duration + isClosed bool } func (c *Conn) Read(b []byte) (count int, e error) { @@ -53,7 +54,8 @@ func (c *Conn) Read(b []byte) (count int, e error) { func (c *Conn) Write(b []byte) (count int, e error) { if c.WriteTimeout != 0 { - err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) + // minimum 4KB/s + err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout * time.Duration(len(b)/40000+1))) if err != nil { return 0, err } @@ -68,7 +70,10 @@ func (c *Conn) Write(b []byte) (count int, e error) { func (c *Conn) Close() error { err := c.Conn.Close() if err == nil { - stats.ConnectionClose() + if !c.isClosed { + stats.ConnectionClose() + c.isClosed = true + } } return err } diff --git a/weed/util/network.go b/weed/util/network.go new file mode 100644 index 000000000..55a123667 --- /dev/null +++ b/weed/util/network.go @@ -0,0 +1,35 @@ +package util + +import ( + "net" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func DetectedHostAddress() string { + netInterfaces, err := net.Interfaces() + if err != nil { + glog.V(0).Infof("failed to detect net interfaces: %v", err) + return "" + } + + for _, netInterface := range netInterfaces { + if (netInterface.Flags & net.FlagUp) == 0 { + continue + } + addrs, err := netInterface.Addrs() + if err != nil { + glog.V(0).Infof("get interface addresses: %v", err) + } + + for _, a := range addrs { + if ipNet, ok := a.(*net.IPNet); ok && !ipNet.IP.IsLoopback() { + if ipNet.IP.To4() != nil { + return ipNet.IP.String() + } + } + } + } + + return "localhost" +} diff --git a/weed/util/parse.go b/weed/util/parse.go index 6593d43b6..0955db682 100644 --- a/weed/util/parse.go +++ b/weed/util/parse.go @@ -1,6 +1,7 @@ package util import ( + "fmt" "net/url" "strconv" "strings" @@ -45,3 +46,18 @@ func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path path = u.Path return } + +func ParseHostPort(hostPort string) (filerServer string, filerPort int64, err error) { + parts := strings.Split(hostPort, ":") + if len(parts) != 2 { + err = fmt.Errorf("failed to parse %s\n", hostPort) + return + } + + filerPort, err = strconv.ParseInt(parts[1], 10, 64) + if err == nil { + filerServer = parts[0] + } + + return +} diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go new file mode 100644 index 000000000..496b9f844 --- /dev/null +++ b/weed/util/queue_unbounded.go @@ -0,0 +1,45 @@ +package util + +import "sync" + +type UnboundedQueue struct { + outbound []string + outboundLock sync.RWMutex + inbound []string + inboundLock sync.RWMutex +} + +func NewUnboundedQueue() *UnboundedQueue { + q := &UnboundedQueue{} + return q +} + +func (q *UnboundedQueue) EnQueue(items ...string) { + q.inboundLock.Lock() + defer q.inboundLock.Unlock() + + q.inbound = append(q.inbound, items...) + +} + +func (q *UnboundedQueue) Consume(fn func([]string)) { + q.outboundLock.Lock() + defer q.outboundLock.Unlock() + + if len(q.outbound) == 0 { + q.inboundLock.Lock() + inbountLen := len(q.inbound) + if inbountLen > 0 { + t := q.outbound + q.outbound = q.inbound + q.inbound = t + } + q.inboundLock.Unlock() + } + + if len(q.outbound) > 0 { + fn(q.outbound) + q.outbound = q.outbound[:0] + } + +} diff --git a/weed/util/queue_unbounded_test.go b/weed/util/queue_unbounded_test.go new file mode 100644 index 000000000..2d02032cb --- /dev/null +++ b/weed/util/queue_unbounded_test.go @@ -0,0 +1,25 @@ +package util + +import "testing" + +func TestEnqueueAndConsume(t *testing.T) { + + q := NewUnboundedQueue() + + q.EnQueue("1", "2", "3") + + f := func(items []string) { + for _, t := range items { + println(t) + } + println("-----------------------") + } + q.Consume(f) + + q.Consume(f) + + q.EnQueue("4", "5") + q.EnQueue("6", "7") + q.Consume(f) + +} diff --git a/weed/util/retry.go b/weed/util/retry.go new file mode 100644 index 000000000..7b0f2d3c3 --- /dev/null +++ b/weed/util/retry.go @@ -0,0 +1,43 @@ +package util + +import ( + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +var RetryWaitTime = 6 * time.Second + +func Retry(name string, job func() error) (err error) { + waitTime := time.Second + hasErr := false + for waitTime < RetryWaitTime { + err = job() + if err == nil { + if hasErr { + glog.V(0).Infof("retry %s successfully", name) + } + break + } + if strings.Contains(err.Error(), "transport") { + hasErr = true + glog.V(0).Infof("retry %s: err: %v", name, err) + time.Sleep(waitTime) + waitTime += waitTime / 2 + } else { + break + } + } + return err +} + +// return the first non empty string +func Nvl(values ...string) string { + for _, s := range values { + if s != "" { + return s + } + } + return "" +} diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go new file mode 100644 index 000000000..5b5fa2704 --- /dev/null +++ b/weed/wdclient/exclusive_locks/exclusive_locker.go @@ -0,0 +1,123 @@ +package exclusive_locks + +import ( + "context" + "sync/atomic" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +const ( + RenewInteval = 4 * time.Second + SafeRenewInteval = 3 * time.Second + InitLockInteval = 1 * time.Second + AdminLockName = "admin" +) + +type ExclusiveLocker struct { + masterClient *wdclient.MasterClient + token int64 + lockTsNs int64 + isLocking bool +} + +func NewExclusiveLocker(masterClient *wdclient.MasterClient) *ExclusiveLocker { + return &ExclusiveLocker{ + masterClient: masterClient, + } +} +func (l *ExclusiveLocker) IsLocking() bool { + return l.isLocking +} + +func (l *ExclusiveLocker) GetToken() (token int64, lockTsNs int64) { + for time.Unix(0, atomic.LoadInt64(&l.lockTsNs)).Add(SafeRenewInteval).Before(time.Now()) { + // wait until now is within the safe lock period, no immediate renewal to change the token + time.Sleep(100 * time.Millisecond) + } + return atomic.LoadInt64(&l.token), atomic.LoadInt64(&l.lockTsNs) +} + +func (l *ExclusiveLocker) RequestLock(clientName string) { + if l.isLocking { + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // retry to get the lease + for { + if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err := client.LeaseAdminToken(ctx, &master_pb.LeaseAdminTokenRequest{ + PreviousToken: atomic.LoadInt64(&l.token), + PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), + LockName: AdminLockName, + ClientName: clientName, + }) + if err == nil { + atomic.StoreInt64(&l.token, resp.Token) + atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs) + } + return err + }); err != nil { + println("lock:", err.Error()) + time.Sleep(InitLockInteval) + } else { + break + } + } + + l.isLocking = true + + // start a goroutine to renew the lease + go func() { + ctx2, cancel2 := context.WithCancel(context.Background()) + defer cancel2() + + for l.isLocking { + if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err := client.LeaseAdminToken(ctx2, &master_pb.LeaseAdminTokenRequest{ + PreviousToken: atomic.LoadInt64(&l.token), + PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), + LockName: AdminLockName, + ClientName: clientName, + }) + if err == nil { + atomic.StoreInt64(&l.token, resp.Token) + atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs) + // println("ts", l.lockTsNs, "token", l.token) + } + return err + }); err != nil { + glog.Errorf("failed to renew lock: %v", err) + return + } else { + time.Sleep(RenewInteval) + } + + } + }() + +} + +func (l *ExclusiveLocker) ReleaseLock() { + l.isLocking = false + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l.masterClient.WithClient(func(client master_pb.SeaweedClient) error { + client.ReleaseAdminToken(ctx, &master_pb.ReleaseAdminTokenRequest{ + PreviousToken: atomic.LoadInt64(&l.token), + PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), + LockName: AdminLockName, + }) + return nil + }) + atomic.StoreInt64(&l.token, 0) + atomic.StoreInt64(&l.lockTsNs, 0) +} diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 111514f5e..e39b9dfdf 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -2,19 +2,21 @@ package wdclient import ( "context" - "fmt" "math/rand" "time" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) type MasterClient struct { - ctx context.Context - name string + clientType string + clientHost string + grpcPort uint32 currentMaster string masters []string grpcDialOption grpc.DialOption @@ -22,13 +24,14 @@ type MasterClient struct { vidMap } -func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { +func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost string, clientGrpcPort uint32, clientDataCenter string, masters []string) *MasterClient { return &MasterClient{ - ctx: ctx, - name: clientName, + clientType: clientType, + clientHost: clientHost, + grpcPort: clientGrpcPort, masters: masters, grpcDialOption: grpcDialOption, - vidMap: newVidMap(), + vidMap: newVidMap(clientDataCenter), } } @@ -43,13 +46,39 @@ func (mc *MasterClient) WaitUntilConnected() { } func (mc *MasterClient) KeepConnectedToMaster() { - glog.V(1).Infof("%s bootstraps with masters %v", mc.name, mc.masters) + glog.V(1).Infof("%s masterClient bootstraps with masters %v", mc.clientType, mc.masters) for { mc.tryAllMasters() time.Sleep(time.Second) } } +func (mc *MasterClient) FindLeaderFromOtherPeers(myMasterAddress string) (leader string) { + for _, master := range mc.masters { + if master == myMasterAddress { + continue + } + if grpcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Millisecond) + defer cancel() + resp, err := client.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return err + } + leader = resp.Leader + return nil + }); grpcErr != nil { + glog.V(0).Infof("connect to %s: %v", master, grpcErr) + } + if leader != "" { + glog.V(0).Infof("existing leader is %s", leader) + return + } + } + glog.V(0).Infof("No existing leader found!") + return +} + func (mc *MasterClient) tryAllMasters() { nextHintedLeader := "" for _, master := range mc.masters { @@ -60,32 +89,35 @@ func (mc *MasterClient) tryAllMasters() { } mc.currentMaster = "" - mc.vidMap = newVidMap() + mc.vidMap = newVidMap("") } } func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) { - glog.V(1).Infof("%s Connecting to master %v", mc.name, master) - gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + glog.V(1).Infof("%s masterClient Connecting to master %v", mc.clientType, master) + gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() stream, err := client.KeepConnected(ctx) if err != nil { - glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.name, master, err) + glog.V(1).Infof("%s masterClient failed to keep connected to %s: %v", mc.clientType, master, err) return err } - if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name}); err != nil { - glog.V(0).Infof("%s failed to send to %s: %v", mc.name, master, err) + if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, GrpcPort: mc.grpcPort}); err != nil { + glog.V(0).Infof("%s masterClient failed to send to %s: %v", mc.clientType, master, err) return err } - glog.V(1).Infof("%s Connected to %v", mc.name, master) + glog.V(1).Infof("%s masterClient Connected to %v", mc.clientType, master) mc.currentMaster = master for { volumeLocation, err := stream.Recv() if err != nil { - glog.V(0).Infof("%s failed to receive from %s: %v", mc.name, master, err) + glog.V(0).Infof("%s masterClient failed to receive from %s: %v", mc.clientType, master, err) return err } @@ -98,42 +130,34 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri // process new volume location loc := Location{ - Url: volumeLocation.Url, - PublicUrl: volumeLocation.PublicUrl, + Url: volumeLocation.Url, + PublicUrl: volumeLocation.PublicUrl, + DataCenter: volumeLocation.DataCenter, } for _, newVid := range volumeLocation.NewVids { - glog.V(1).Infof("%s: %s adds volume %d", mc.name, loc.Url, newVid) + glog.V(1).Infof("%s: %s masterClient adds volume %d", mc.clientType, loc.Url, newVid) mc.addLocation(newVid, loc) } for _, deletedVid := range volumeLocation.DeletedVids { - glog.V(1).Infof("%s: %s removes volume %d", mc.name, loc.Url, deletedVid) + glog.V(1).Infof("%s: %s masterClient removes volume %d", mc.clientType, loc.Url, deletedVid) mc.deleteLocation(deletedVid, loc) } } }) if gprcErr != nil { - glog.V(0).Infof("%s failed to connect with master %v: %v", mc.name, master, gprcErr) + glog.V(1).Infof("%s masterClient failed to connect with master %v: %v", mc.clientType, master, gprcErr) } return } -func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) - if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) - } - - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx, client) - }, masterGrpcAddress, grpcDialOption) - -} - -func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error { - return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - return fn(client) +func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error { + return util.Retry("master grpc", func() error { + for mc.currentMaster == "" { + time.Sleep(3 * time.Second) + } + return pb.WithMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) }) } diff --git a/weed/wdclient/net2/base_connection_pool.go b/weed/wdclient/net2/base_connection_pool.go new file mode 100644 index 000000000..5cc037d0f --- /dev/null +++ b/weed/wdclient/net2/base_connection_pool.go @@ -0,0 +1,159 @@ +package net2 + +import ( + "net" + "strings" + "time" + + rp "github.com/chrislusf/seaweedfs/weed/wdclient/resource_pool" +) + +const defaultDialTimeout = 1 * time.Second + +func defaultDialFunc(network string, address string) (net.Conn, error) { + return net.DialTimeout(network, address, defaultDialTimeout) +} + +func parseResourceLocation(resourceLocation string) ( + network string, + address string) { + + idx := strings.Index(resourceLocation, " ") + if idx >= 0 { + return resourceLocation[:idx], resourceLocation[idx+1:] + } + + return "", resourceLocation +} + +// A thin wrapper around the underlying resource pool. +type connectionPoolImpl struct { + options ConnectionOptions + + pool rp.ResourcePool +} + +// This returns a connection pool where all connections are connected +// to the same (network, address) +func newBaseConnectionPool( + options ConnectionOptions, + createPool func(rp.Options) rp.ResourcePool) ConnectionPool { + + dial := options.Dial + if dial == nil { + dial = defaultDialFunc + } + + openFunc := func(loc string) (interface{}, error) { + network, address := parseResourceLocation(loc) + return dial(network, address) + } + + closeFunc := func(handle interface{}) error { + return handle.(net.Conn).Close() + } + + poolOptions := rp.Options{ + MaxActiveHandles: options.MaxActiveConnections, + MaxIdleHandles: options.MaxIdleConnections, + MaxIdleTime: options.MaxIdleTime, + OpenMaxConcurrency: options.DialMaxConcurrency, + Open: openFunc, + Close: closeFunc, + NowFunc: options.NowFunc, + } + + return &connectionPoolImpl{ + options: options, + pool: createPool(poolOptions), + } +} + +// This returns a connection pool where all connections are connected +// to the same (network, address) +func NewSimpleConnectionPool(options ConnectionOptions) ConnectionPool { + return newBaseConnectionPool(options, rp.NewSimpleResourcePool) +} + +// This returns a connection pool that manages multiple (network, address) +// entries. The connections to each (network, address) entry acts +// independently. For example ("tcp", "localhost:11211") could act as memcache +// shard 0 and ("tcp", "localhost:11212") could act as memcache shard 1. +func NewMultiConnectionPool(options ConnectionOptions) ConnectionPool { + return newBaseConnectionPool( + options, + func(poolOptions rp.Options) rp.ResourcePool { + return rp.NewMultiResourcePool(poolOptions, nil) + }) +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) NumActive() int32 { + return p.pool.NumActive() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) ActiveHighWaterMark() int32 { + return p.pool.ActiveHighWaterMark() +} + +// This returns the number of alive idle connections. This method is not part +// of ConnectionPool's API. It is used only for testing. +func (p *connectionPoolImpl) NumIdle() int { + return p.pool.NumIdle() +} + +// BaseConnectionPool can only register a single (network, address) entry. +// Register should be call before any Get calls. +func (p *connectionPoolImpl) Register(network string, address string) error { + return p.pool.Register(network + " " + address) +} + +// BaseConnectionPool has nothing to do on Unregister. +func (p *connectionPoolImpl) Unregister(network string, address string) error { + return nil +} + +func (p *connectionPoolImpl) ListRegistered() []NetworkAddress { + result := make([]NetworkAddress, 0, 1) + for _, location := range p.pool.ListRegistered() { + network, address := parseResourceLocation(location) + + result = append( + result, + NetworkAddress{ + Network: network, + Address: address, + }) + } + return result +} + +// This gets an active connection from the connection pool. Note that network +// and address arguments are ignored (The connections with point to the +// network/address provided by the first Register call). +func (p *connectionPoolImpl) Get( + network string, + address string) (ManagedConn, error) { + + handle, err := p.pool.Get(network + " " + address) + if err != nil { + return nil, err + } + return NewManagedConn(network, address, handle, p, p.options), nil +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) Release(conn ManagedConn) error { + return conn.ReleaseConnection() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) Discard(conn ManagedConn) error { + return conn.DiscardConnection() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) EnterLameDuckMode() { + p.pool.EnterLameDuckMode() +} diff --git a/weed/wdclient/net2/connection_pool.go b/weed/wdclient/net2/connection_pool.go new file mode 100644 index 000000000..5b8d4d232 --- /dev/null +++ b/weed/wdclient/net2/connection_pool.go @@ -0,0 +1,97 @@ +package net2 + +import ( + "net" + "time" +) + +type ConnectionOptions struct { + // The maximum number of connections that can be active per host at any + // given time (A non-positive value indicates the number of connections + // is unbounded). + MaxActiveConnections int32 + + // The maximum number of idle connections per host that are kept alive by + // the connection pool. + MaxIdleConnections uint32 + + // The maximum amount of time an idle connection can alive (if specified). + MaxIdleTime *time.Duration + + // This limits the number of concurrent Dial calls (there's no limit when + // DialMaxConcurrency is non-positive). + DialMaxConcurrency int + + // Dial specifies the dial function for creating network connections. + // If Dial is nil, net.DialTimeout is used, with timeout set to 1 second. + Dial func(network string, address string) (net.Conn, error) + + // This specifies the now time function. When the function is non-nil, the + // connection pool will use the specified function instead of time.Now to + // generate the current time. + NowFunc func() time.Time + + // This specifies the timeout for any Read() operation. + // Note that setting this to 0 (i.e. not setting it) will make + // read operations block indefinitely. + ReadTimeout time.Duration + + // This specifies the timeout for any Write() operation. + // Note that setting this to 0 (i.e. not setting it) will make + // write operations block indefinitely. + WriteTimeout time.Duration +} + +func (o ConnectionOptions) getCurrentTime() time.Time { + if o.NowFunc == nil { + return time.Now() + } else { + return o.NowFunc() + } +} + +// A generic interface for managed connection pool. All connection pool +// implementations must be threadsafe. +type ConnectionPool interface { + // This returns the number of active connections that are on loan. + NumActive() int32 + + // This returns the highest number of active connections for the entire + // lifetime of the pool. + ActiveHighWaterMark() int32 + + // This returns the number of idle connections that are in the pool. + NumIdle() int + + // This associates (network, address) to the connection pool; afterwhich, + // the user can get connections to (network, address). + Register(network string, address string) error + + // This dissociate (network, address) from the connection pool; + // afterwhich, the user can no longer get connections to + // (network, address). + Unregister(network string, address string) error + + // This returns the list of registered (network, address) entries. + ListRegistered() []NetworkAddress + + // This gets an active connection from the connection pool. The connection + // will remain active until one of the following is called: + // 1. conn.ReleaseConnection() + // 2. conn.DiscardConnection() + // 3. pool.Release(conn) + // 4. pool.Discard(conn) + Get(network string, address string) (ManagedConn, error) + + // This releases an active connection back to the connection pool. + Release(conn ManagedConn) error + + // This discards an active connection from the connection pool. + Discard(conn ManagedConn) error + + // Enter the connection pool into lame duck mode. The connection pool + // will no longer return connections, and all idle connections are closed + // immediately (including active connections that are released back to the + // pool afterward). + EnterLameDuckMode() +} diff --git a/weed/wdclient/net2/doc.go b/weed/wdclient/net2/doc.go new file mode 100644 index 000000000..fd1c6323d --- /dev/null +++ b/weed/wdclient/net2/doc.go @@ -0,0 +1,6 @@ +// net2 is a collection of functions meant to supplement the capabilities +// provided by the standard "net" package. +package net2 + +// copied from https://github.com/dropbox/godropbox/tree/master/net2 +// removed other dependencies diff --git a/weed/wdclient/net2/ip.go b/weed/wdclient/net2/ip.go new file mode 100644 index 000000000..ff5e3b24e --- /dev/null +++ b/weed/wdclient/net2/ip.go @@ -0,0 +1,177 @@ +package net2 + +import ( + "fmt" + "log" + "net" + "os" + "strings" + "sync" +) + +var myHostname string +var myHostnameOnce sync.Once + +// Like os.Hostname but caches first successful result, making it cheap to call it +// over and over. +// It will also crash whole process if fetching Hostname fails! +func MyHostname() string { + myHostnameOnce.Do(func() { + var err error + myHostname, err = os.Hostname() + if err != nil { + log.Fatal(err) + } + }) + return myHostname +} + +var myIp4 *net.IPAddr +var myIp4Once sync.Once + +// Resolves `MyHostname()` to an Ip4 address. Caches first successful result, making it +// cheap to call it over and over. +// It will also crash whole process if resolving the IP fails! +func MyIp4() *net.IPAddr { + myIp4Once.Do(func() { + var err error + myIp4, err = net.ResolveIPAddr("ip4", MyHostname()) + if err != nil { + log.Fatal(err) + } + }) + return myIp4 +} + +var myIp6 *net.IPAddr +var myIp6Once sync.Once + +// Resolves `MyHostname()` to an Ip6 address. Caches first successful result, making it +// cheap to call it over and over. +// It will also crash whole process if resolving the IP fails! +func MyIp6() *net.IPAddr { + myIp6Once.Do(func() { + var err error + myIp6, err = net.ResolveIPAddr("ip6", MyHostname()) + if err != nil { + log.Fatal(err) + } + }) + return myIp6 +} + +// This returns the list of local ip addresses which other hosts can connect +// to (NOTE: Loopback ip is ignored). +// Also resolves Hostname to an address and adds it to the list too, so +// IPs from /etc/hosts can work too. +func GetLocalIPs() ([]*net.IP, error) { + hostname, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("Failed to lookup hostname: %v", err) + } + // Resolves IP Address from Hostname, this way overrides in /etc/hosts + // can work too for IP resolution. + ipInfo, err := net.ResolveIPAddr("ip4", hostname) + if err != nil { + return nil, fmt.Errorf("Failed to resolve ip: %v", err) + } + ips := []*net.IP{&ipInfo.IP} + + // TODO(zviad): Is rest of the code really necessary? + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, fmt.Errorf("Failed to get interface addresses: %v", err) + } + for _, addr := range addrs { + ipnet, ok := addr.(*net.IPNet) + if !ok { + continue + } + + if ipnet.IP.IsLoopback() { + continue + } + + ips = append(ips, &ipnet.IP) + } + return ips, nil +} + +var localhostIPNets []*net.IPNet + +func init() { + for _, mask := range []string{"127.0.0.1/8", "::1/128"} { + _, ipnet, err := net.ParseCIDR(mask) + if err != nil { + panic(err) + } + localhostIPNets = append(localhostIPNets, ipnet) + } +} + +func IsLocalhostIp(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, ipnet := range localhostIPNets { + if ipnet.Contains(ip) { + return true + } + } + return false +} + +// Given a host string, return true if the host is an ip (v4/v6) localhost. +func IsLocalhost(host string) bool { + return IsLocalhostIp(host) || + host == "localhost" || + host == "ip6-localhost" || + host == "ipv6-localhost" +} + +// Resolves hostnames in addresses to actual IP4 addresses. Skips all invalid addresses +// and all addresses that can't be resolved. +// `addrs` are assumed to be of form: ["<hostname>:<port>", ...] +// Returns an error in addition to resolved addresses if not all resolutions succeed. +func ResolveIP4s(addrs []string) ([]string, error) { + resolvedAddrs := make([]string, 0, len(addrs)) + var lastErr error + + for _, server := range addrs { + hostPort := strings.Split(server, ":") + if len(hostPort) != 2 { + lastErr = fmt.Errorf("Skipping invalid address: %s", server) + continue + } + + ip, err := net.ResolveIPAddr("ip4", hostPort[0]) + if err != nil { + lastErr = err + continue + } + resolvedAddrs = append(resolvedAddrs, ip.IP.String()+":"+hostPort[1]) + } + return resolvedAddrs, lastErr +} + +func LookupValidAddrs() (map[string]bool, error) { + hostName, err := os.Hostname() + if err != nil { + return nil, err + } + addrs, err := net.LookupHost(hostName) + if err != nil { + return nil, err + } + validAddrs := make(map[string]bool) + validAddrs[hostName] = true + for _, addr := range addrs { + validAddrs[addr] = true + } + // Special case localhost/127.0.0.1 so that this works on devVMs. It should + // have no affect in production. + validAddrs["127.0.0.1"] = true + validAddrs["localhost"] = true + return validAddrs, nil +} diff --git a/weed/wdclient/net2/managed_connection.go b/weed/wdclient/net2/managed_connection.go new file mode 100644 index 000000000..a886210d1 --- /dev/null +++ b/weed/wdclient/net2/managed_connection.go @@ -0,0 +1,185 @@ +package net2 + +import ( + "fmt" + "net" + "time" + + "errors" + "github.com/chrislusf/seaweedfs/weed/wdclient/resource_pool" +) + +// Dial's arguments. +type NetworkAddress struct { + Network string + Address string +} + +// A connection managed by a connection pool. NOTE: SetDeadline, +// SetReadDeadline and SetWriteDeadline are disabled for managed connections. +// (The deadlines are set by the connection pool). +type ManagedConn interface { + net.Conn + + // This returns the original (network, address) entry used for creating + // the connection. + Key() NetworkAddress + + // This returns the underlying net.Conn implementation. + RawConn() net.Conn + + // This returns the connection pool which owns this connection. + Owner() ConnectionPool + + // This indictes a user is done with the connection and releases the + // connection back to the connection pool. + ReleaseConnection() error + + // This indicates the connection is an invalid state, and that the + // connection should be discarded from the connection pool. + DiscardConnection() error +} + +// A physical implementation of ManagedConn +type managedConnImpl struct { + addr NetworkAddress + handle resource_pool.ManagedHandle + pool ConnectionPool + options ConnectionOptions +} + +// This creates a managed connection wrapper. +func NewManagedConn( + network string, + address string, + handle resource_pool.ManagedHandle, + pool ConnectionPool, + options ConnectionOptions) ManagedConn { + + addr := NetworkAddress{ + Network: network, + Address: address, + } + + return &managedConnImpl{ + addr: addr, + handle: handle, + pool: pool, + options: options, + } +} + +func (c *managedConnImpl) rawConn() (net.Conn, error) { + h, err := c.handle.Handle() + return h.(net.Conn), err +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) RawConn() net.Conn { + h, _ := c.handle.Handle() + return h.(net.Conn) +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) Key() NetworkAddress { + return c.addr +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) Owner() ConnectionPool { + return c.pool +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) ReleaseConnection() error { + return c.handle.Release() +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) DiscardConnection() error { + return c.handle.Discard() +} + +// See net.Conn for documentation +func (c *managedConnImpl) Read(b []byte) (n int, err error) { + conn, err := c.rawConn() + if err != nil { + return 0, err + } + + if c.options.ReadTimeout > 0 { + deadline := c.options.getCurrentTime().Add(c.options.ReadTimeout) + _ = conn.SetReadDeadline(deadline) + } + n, err = conn.Read(b) + if err != nil { + var localAddr string + if conn.LocalAddr() != nil { + localAddr = conn.LocalAddr().String() + } else { + localAddr = "(nil)" + } + + var remoteAddr string + if conn.RemoteAddr() != nil { + remoteAddr = conn.RemoteAddr().String() + } else { + remoteAddr = "(nil)" + } + err = fmt.Errorf("Read error from host: %s <-> %s: %v", localAddr, remoteAddr, err) + } + return +} + +// See net.Conn for documentation +func (c *managedConnImpl) Write(b []byte) (n int, err error) { + conn, err := c.rawConn() + if err != nil { + return 0, err + } + + if c.options.WriteTimeout > 0 { + deadline := c.options.getCurrentTime().Add(c.options.WriteTimeout) + _ = conn.SetWriteDeadline(deadline) + } + n, err = conn.Write(b) + if err != nil { + err = fmt.Errorf("Write error: %v", err) + } + return +} + +// See net.Conn for documentation +func (c *managedConnImpl) Close() error { + return c.handle.Discard() +} + +// See net.Conn for documentation +func (c *managedConnImpl) LocalAddr() net.Addr { + conn, _ := c.rawConn() + return conn.LocalAddr() +} + +// See net.Conn for documentation +func (c *managedConnImpl) RemoteAddr() net.Addr { + conn, _ := c.rawConn() + return conn.RemoteAddr() +} + +// SetDeadline is disabled for managed connection (The deadline is set by +// us, with respect to the read/write timeouts specified in ConnectionOptions). +func (c *managedConnImpl) SetDeadline(t time.Time) error { + return errors.New("Cannot set deadline for managed connection") +} + +// SetReadDeadline is disabled for managed connection (The deadline is set by +// us with respect to the read timeout specified in ConnectionOptions). +func (c *managedConnImpl) SetReadDeadline(t time.Time) error { + return errors.New("Cannot set read deadline for managed connection") +} + +// SetWriteDeadline is disabled for managed connection (The deadline is set by +// us with respect to the write timeout specified in ConnectionOptions). +func (c *managedConnImpl) SetWriteDeadline(t time.Time) error { + return errors.New("Cannot set write deadline for managed connection") +} diff --git a/weed/wdclient/net2/port.go b/weed/wdclient/net2/port.go new file mode 100644 index 000000000..f83adba28 --- /dev/null +++ b/weed/wdclient/net2/port.go @@ -0,0 +1,19 @@ +package net2 + +import ( + "net" + "strconv" +) + +// Returns the port information. +func GetPort(addr net.Addr) (int, error) { + _, lport, err := net.SplitHostPort(addr.String()) + if err != nil { + return -1, err + } + lportInt, err := strconv.Atoi(lport) + if err != nil { + return -1, err + } + return lportInt, nil +} diff --git a/weed/wdclient/resource_pool/doc.go b/weed/wdclient/resource_pool/doc.go new file mode 100644 index 000000000..b8b3f92fa --- /dev/null +++ b/weed/wdclient/resource_pool/doc.go @@ -0,0 +1,5 @@ +// A generic resource pool for managing resources such as network connections. +package resource_pool + +// copied from https://github.com/dropbox/godropbox/tree/master/resource_pool +// removed other dependencies diff --git a/weed/wdclient/resource_pool/managed_handle.go b/weed/wdclient/resource_pool/managed_handle.go new file mode 100644 index 000000000..e1d82ca7b --- /dev/null +++ b/weed/wdclient/resource_pool/managed_handle.go @@ -0,0 +1,97 @@ +package resource_pool + +import ( + "sync/atomic" + + "errors" +) + +// A resource handle managed by a resource pool. +type ManagedHandle interface { + // This returns the handle's resource location. + ResourceLocation() string + + // This returns the underlying resource handle (or error if the handle + // is no longer active). + Handle() (interface{}, error) + + // This returns the resource pool which owns this handle. + Owner() ResourcePool + + // The releases the underlying resource handle to the caller and marks the + // managed handle as inactive. The caller is responsible for cleaning up + // the released handle. This returns nil if the managed handle no longer + // owns the resource. + ReleaseUnderlyingHandle() interface{} + + // This indictes a user is done with the handle and releases the handle + // back to the resource pool. + Release() error + + // This indicates the handle is an invalid state, and that the + // connection should be discarded from the connection pool. + Discard() error +} + +// A physical implementation of ManagedHandle +type managedHandleImpl struct { + location string + handle interface{} + pool ResourcePool + isActive int32 // atomic bool + options Options +} + +// This creates a managed handle wrapper. +func NewManagedHandle( + resourceLocation string, + handle interface{}, + pool ResourcePool, + options Options) ManagedHandle { + + h := &managedHandleImpl{ + location: resourceLocation, + handle: handle, + pool: pool, + options: options, + } + atomic.StoreInt32(&h.isActive, 1) + + return h +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) ResourceLocation() string { + return c.location +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Handle() (interface{}, error) { + if atomic.LoadInt32(&c.isActive) == 0 { + return c.handle, errors.New("Resource handle is no longer valid") + } + return c.handle, nil +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Owner() ResourcePool { + return c.pool +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) ReleaseUnderlyingHandle() interface{} { + if atomic.CompareAndSwapInt32(&c.isActive, 1, 0) { + return c.handle + } + return nil +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Release() error { + return c.pool.Release(c) +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Discard() error { + return c.pool.Discard(c) +} diff --git a/weed/wdclient/resource_pool/multi_resource_pool.go b/weed/wdclient/resource_pool/multi_resource_pool.go new file mode 100644 index 000000000..9ac25526d --- /dev/null +++ b/weed/wdclient/resource_pool/multi_resource_pool.go @@ -0,0 +1,200 @@ +package resource_pool + +import ( + "fmt" + "sync" + + "errors" +) + +// A resource pool implementation that manages multiple resource location +// entries. The handles to each resource location entry acts independently. +// For example "tcp localhost:11211" could act as memcache +// shard 0 and "tcp localhost:11212" could act as memcache shard 1. +type multiResourcePool struct { + options Options + + createPool func(Options) ResourcePool + + rwMutex sync.RWMutex + isLameDuck bool // guarded by rwMutex + // NOTE: the locationPools is guarded by rwMutex, but the pool entries + // are not. + locationPools map[string]ResourcePool +} + +// This returns a MultiResourcePool, which manages multiple +// resource location entries. The handles to each resource location +// entry acts independently. +// +// When createPool is nil, NewSimpleResourcePool is used as default. +func NewMultiResourcePool( + options Options, + createPool func(Options) ResourcePool) ResourcePool { + + if createPool == nil { + createPool = NewSimpleResourcePool + } + + return &multiResourcePool{ + options: options, + createPool: createPool, + rwMutex: sync.RWMutex{}, + isLameDuck: false, + locationPools: make(map[string]ResourcePool), + } +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) NumActive() int32 { + total := int32(0) + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + total += pool.NumActive() + } + return total +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) ActiveHighWaterMark() int32 { + high := int32(0) + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + val := pool.ActiveHighWaterMark() + if val > high { + high = val + } + } + return high +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) NumIdle() int { + total := 0 + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + total += pool.NumIdle() + } + return total +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Register(resourceLocation string) error { + if resourceLocation == "" { + return errors.New("Registering invalid resource location") + } + + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + if p.isLameDuck { + return fmt.Errorf( + "Cannot register %s to lame duck resource pool", + resourceLocation) + } + + if _, inMap := p.locationPools[resourceLocation]; inMap { + return nil + } + + pool := p.createPool(p.options) + if err := pool.Register(resourceLocation); err != nil { + return err + } + + p.locationPools[resourceLocation] = pool + return nil +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Unregister(resourceLocation string) error { + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + if pool, inMap := p.locationPools[resourceLocation]; inMap { + _ = pool.Unregister("") + pool.EnterLameDuckMode() + delete(p.locationPools, resourceLocation) + } + return nil +} + +func (p *multiResourcePool) ListRegistered() []string { + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + result := make([]string, 0, len(p.locationPools)) + for key, _ := range p.locationPools { + result = append(result, key) + } + + return result +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Get( + resourceLocation string) (ManagedHandle, error) { + + pool := p.getPool(resourceLocation) + if pool == nil { + return nil, fmt.Errorf( + "%s is not registered in the resource pool", + resourceLocation) + } + return pool.Get(resourceLocation) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Release(handle ManagedHandle) error { + pool := p.getPool(handle.ResourceLocation()) + if pool == nil { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + return pool.Release(handle) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Discard(handle ManagedHandle) error { + pool := p.getPool(handle.ResourceLocation()) + if pool == nil { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + return pool.Discard(handle) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) EnterLameDuckMode() { + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + p.isLameDuck = true + + for _, pool := range p.locationPools { + pool.EnterLameDuckMode() + } +} + +func (p *multiResourcePool) getPool(resourceLocation string) ResourcePool { + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + if pool, inMap := p.locationPools[resourceLocation]; inMap { + return pool + } + return nil +} diff --git a/weed/wdclient/resource_pool/resource_pool.go b/weed/wdclient/resource_pool/resource_pool.go new file mode 100644 index 000000000..26c433f50 --- /dev/null +++ b/weed/wdclient/resource_pool/resource_pool.go @@ -0,0 +1,96 @@ +package resource_pool + +import ( + "time" +) + +type Options struct { + // The maximum number of active resource handles per resource location. (A + // non-positive value indicates the number of active resource handles is + // unbounded). + MaxActiveHandles int32 + + // The maximum number of idle resource handles per resource location that + // are kept alive by the resource pool. + MaxIdleHandles uint32 + + // The maximum amount of time an idle resource handle can remain alive (if + // specified). + MaxIdleTime *time.Duration + + // This limits the number of concurrent Open calls (there's no limit when + // OpenMaxConcurrency is non-positive). + OpenMaxConcurrency int + + // This function creates a resource handle (e.g., a connection) for a + // resource location. The function must be thread-safe. + Open func(resourceLocation string) ( + handle interface{}, + err error) + + // This function destroys a resource handle and performs the necessary + // cleanup to free up resources. The function must be thread-safe. + Close func(handle interface{}) error + + // This specifies the now time function. When the function is non-nil, the + // resource pool will use the specified function instead of time.Now to + // generate the current time. + NowFunc func() time.Time +} + +func (o Options) getCurrentTime() time.Time { + if o.NowFunc == nil { + return time.Now() + } else { + return o.NowFunc() + } +} + +// A generic interface for managed resource pool. All resource pool +// implementations must be threadsafe. +type ResourcePool interface { + // This returns the number of active resource handles. + NumActive() int32 + + // This returns the highest number of actives handles for the entire + // lifetime of the pool. If the pool contains multiple sub-pools, the + // high water mark is the max of the sub-pools' high water marks. + ActiveHighWaterMark() int32 + + // This returns the number of alive idle handles. NOTE: This is only used + // for testing. + NumIdle() int + + // This associates a resource location to the resource pool; afterwhich, + // the user can get resource handles for the resource location. + Register(resourceLocation string) error + + // This dissociates a resource location from the resource pool; afterwhich, + // the user can no longer get resource handles for the resource location. + // If the given resource location corresponds to a sub-pool, the unregistered + // sub-pool will enter lame duck mode. + Unregister(resourceLocation string) error + + // This returns the list of registered resource location entries. + ListRegistered() []string + + // This gets an active resource handle from the resource pool. The + // handle will remain active until one of the following is called: + // 1. handle.Release() + // 2. handle.Discard() + // 3. pool.Release(handle) + // 4. pool.Discard(handle) + Get(key string) (ManagedHandle, error) + + // This releases an active resource handle back to the resource pool. + Release(handle ManagedHandle) error + + // This discards an active resource from the resource pool. + Discard(handle ManagedHandle) error + + // Enter the resource pool into lame duck mode. The resource pool + // will no longer return resource handles, and all idle resource handles + // are closed immediately (including active resource handles that are + // released back to the pool afterward). + EnterLameDuckMode() +} diff --git a/weed/wdclient/resource_pool/semaphore.go b/weed/wdclient/resource_pool/semaphore.go new file mode 100644 index 000000000..ff35d5bc5 --- /dev/null +++ b/weed/wdclient/resource_pool/semaphore.go @@ -0,0 +1,154 @@ +package resource_pool + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +type Semaphore interface { + // Increment the semaphore counter by one. + Release() + + // Decrement the semaphore counter by one, and block if counter < 0 + Acquire() + + // Decrement the semaphore counter by one, and block if counter < 0 + // Wait for up to the given duration. Returns true if did not timeout + TryAcquire(timeout time.Duration) bool +} + +// A simple counting Semaphore. +type boundedSemaphore struct { + slots chan struct{} +} + +// Create a bounded semaphore. The count parameter must be a positive number. +// NOTE: The bounded semaphore will panic if the user tries to Release +// beyond the specified count. +func NewBoundedSemaphore(count uint) Semaphore { + sem := &boundedSemaphore{ + slots: make(chan struct{}, int(count)), + } + for i := 0; i < cap(sem.slots); i++ { + sem.slots <- struct{}{} + } + return sem +} + +// Acquire returns on successful acquisition. +func (sem *boundedSemaphore) Acquire() { + <-sem.slots +} + +// TryAcquire returns true if it acquires a resource slot within the +// timeout, false otherwise. +func (sem *boundedSemaphore) TryAcquire(timeout time.Duration) bool { + if timeout > 0 { + // Wait until we get a slot or timeout expires. + tm := time.NewTimer(timeout) + defer tm.Stop() + select { + case <-sem.slots: + return true + case <-tm.C: + // Timeout expired. In very rare cases this might happen even if + // there is a slot available, e.g. GC pause after we create the timer + // and select randomly picked this one out of the two available channels. + // We should do one final immediate check below. + } + } + + // Return true if we have a slot available immediately and false otherwise. + select { + case <-sem.slots: + return true + default: + return false + } +} + +// Release the acquired semaphore. You must not release more than you +// have acquired. +func (sem *boundedSemaphore) Release() { + select { + case sem.slots <- struct{}{}: + default: + // slots is buffered. If a send blocks, it indicates a programming + // error. + panic(fmt.Errorf("too many releases for boundedSemaphore")) + } +} + +// This returns an unbound counting semaphore with the specified initial count. +// The semaphore counter can be arbitrary large (i.e., Release can be called +// unlimited amount of times). +// +// NOTE: In general, users should use bounded semaphore since it is more +// efficient than unbounded semaphore. +func NewUnboundedSemaphore(initialCount int) Semaphore { + res := &unboundedSemaphore{ + counter: int64(initialCount), + } + res.cond.L = &res.lock + return res +} + +type unboundedSemaphore struct { + lock sync.Mutex + cond sync.Cond + counter int64 +} + +func (s *unboundedSemaphore) Release() { + s.lock.Lock() + s.counter += 1 + if s.counter > 0 { + // Not broadcasting here since it's unlike we can satify all waiting + // goroutines. Instead, we will Signal again if there are left over + // quota after Acquire, in case of lost wakeups. + s.cond.Signal() + } + s.lock.Unlock() +} + +func (s *unboundedSemaphore) Acquire() { + s.lock.Lock() + for s.counter < 1 { + s.cond.Wait() + } + s.counter -= 1 + if s.counter > 0 { + s.cond.Signal() + } + s.lock.Unlock() +} + +func (s *unboundedSemaphore) TryAcquire(timeout time.Duration) bool { + done := make(chan bool, 1) + // Gate used to communicate between the threads and decide what the result + // is. If the main thread decides, we have timed out, otherwise we succeed. + decided := new(int32) + atomic.StoreInt32(decided, 0) + go func() { + s.Acquire() + if atomic.SwapInt32(decided, 1) == 0 { + // Acquire won the race + done <- true + } else { + // If we already decided the result, and this thread did not win + s.Release() + } + }() + select { + case <-done: + return true + case <-time.After(timeout): + if atomic.SwapInt32(decided, 1) == 1 { + // The other thread already decided the result + return true + } + return false + } +} diff --git a/weed/wdclient/resource_pool/simple_resource_pool.go b/weed/wdclient/resource_pool/simple_resource_pool.go new file mode 100644 index 000000000..b0c539100 --- /dev/null +++ b/weed/wdclient/resource_pool/simple_resource_pool.go @@ -0,0 +1,343 @@ +package resource_pool + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +type idleHandle struct { + handle interface{} + keepUntil *time.Time +} + +type TooManyHandles struct { + location string +} + +func (t TooManyHandles) Error() string { + return fmt.Sprintf("Too many handles to %s", t.location) +} + +type OpenHandleError struct { + location string + err error +} + +func (o OpenHandleError) Error() string { + return fmt.Sprintf("Failed to open resource handle: %s (%v)", o.location, o.err) +} + +// A resource pool implementation where all handles are associated to the +// same resource location. +type simpleResourcePool struct { + options Options + + numActive *int32 // atomic counter + + activeHighWaterMark *int32 // atomic / monotonically increasing value + + openTokens Semaphore + + mutex sync.Mutex + location string // guard by mutex + idleHandles []*idleHandle // guarded by mutex + isLameDuck bool // guarded by mutex +} + +// This returns a SimpleResourcePool, where all handles are associated to a +// single resource location. +func NewSimpleResourcePool(options Options) ResourcePool { + numActive := new(int32) + atomic.StoreInt32(numActive, 0) + + activeHighWaterMark := new(int32) + atomic.StoreInt32(activeHighWaterMark, 0) + + var tokens Semaphore + if options.OpenMaxConcurrency > 0 { + tokens = NewBoundedSemaphore(uint(options.OpenMaxConcurrency)) + } + + return &simpleResourcePool{ + location: "", + options: options, + numActive: numActive, + activeHighWaterMark: activeHighWaterMark, + openTokens: tokens, + mutex: sync.Mutex{}, + idleHandles: make([]*idleHandle, 0, 0), + isLameDuck: false, + } +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) NumActive() int32 { + return atomic.LoadInt32(p.numActive) +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) ActiveHighWaterMark() int32 { + return atomic.LoadInt32(p.activeHighWaterMark) +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) NumIdle() int { + p.mutex.Lock() + defer p.mutex.Unlock() + return len(p.idleHandles) +} + +// SimpleResourcePool can only register a single (network, address) entry. +// Register should be call before any Get calls. +func (p *simpleResourcePool) Register(resourceLocation string) error { + if resourceLocation == "" { + return errors.New("Invalid resource location") + } + + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.isLameDuck { + return fmt.Errorf( + "cannot register %s to lame duck resource pool", + resourceLocation) + } + + if p.location == "" { + p.location = resourceLocation + return nil + } + return errors.New("SimpleResourcePool can only register one location") +} + +// SimpleResourcePool will enter lame duck mode upon calling Unregister. +func (p *simpleResourcePool) Unregister(resourceLocation string) error { + p.EnterLameDuckMode() + return nil +} + +func (p *simpleResourcePool) ListRegistered() []string { + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.location != "" { + return []string{p.location} + } + return []string{} +} + +func (p *simpleResourcePool) getLocation() (string, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.location == "" { + return "", fmt.Errorf( + "resource location is not set for SimpleResourcePool") + } + + if p.isLameDuck { + return "", fmt.Errorf( + "lame duck resource pool cannot return handles to %s", + p.location) + } + + return p.location, nil +} + +// This gets an active resource from the resource pool. Note that the +// resourceLocation argument is ignored (The handles are associated to the +// resource location provided by the first Register call). +func (p *simpleResourcePool) Get(unused string) (ManagedHandle, error) { + activeCount := atomic.AddInt32(p.numActive, 1) + if p.options.MaxActiveHandles > 0 && + activeCount > p.options.MaxActiveHandles { + + atomic.AddInt32(p.numActive, -1) + return nil, TooManyHandles{p.location} + } + + highest := atomic.LoadInt32(p.activeHighWaterMark) + for activeCount > highest && + !atomic.CompareAndSwapInt32( + p.activeHighWaterMark, + highest, + activeCount) { + + highest = atomic.LoadInt32(p.activeHighWaterMark) + } + + if h := p.getIdleHandle(); h != nil { + return h, nil + } + + location, err := p.getLocation() + if err != nil { + atomic.AddInt32(p.numActive, -1) + return nil, err + } + + if p.openTokens != nil { + // Current implementation does not wait for tokens to become available. + // If that causes availability hits, we could increase the wait, + // similar to simple_pool.go. + if p.openTokens.TryAcquire(0) { + defer p.openTokens.Release() + } else { + // We could not immediately acquire a token. + // Instead of waiting + atomic.AddInt32(p.numActive, -1) + return nil, OpenHandleError{ + p.location, errors.New("Open Error: reached OpenMaxConcurrency")} + } + } + + handle, err := p.options.Open(location) + if err != nil { + atomic.AddInt32(p.numActive, -1) + return nil, OpenHandleError{p.location, err} + } + + return NewManagedHandle(p.location, handle, p, p.options), nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) Release(handle ManagedHandle) error { + if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + h := handle.ReleaseUnderlyingHandle() + if h != nil { + // We can unref either before or after queuing the idle handle. + // The advantage of unref-ing before queuing is that there is + // a higher chance of successful Get when number of active handles + // is close to the limit (but potentially more handle creation). + // The advantage of queuing before unref-ing is that there's a + // higher chance of reusing handle (but potentially more Get failures). + atomic.AddInt32(p.numActive, -1) + p.queueIdleHandles(h) + } + + return nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) Discard(handle ManagedHandle) error { + if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + h := handle.ReleaseUnderlyingHandle() + if h != nil { + atomic.AddInt32(p.numActive, -1) + if err := p.options.Close(h); err != nil { + return fmt.Errorf("failed to close resource handle: %v", err) + } + } + return nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) EnterLameDuckMode() { + p.mutex.Lock() + + toClose := p.idleHandles + p.isLameDuck = true + p.idleHandles = []*idleHandle{} + + p.mutex.Unlock() + + p.closeHandles(toClose) +} + +// This returns an idle resource, if there is one. +func (p *simpleResourcePool) getIdleHandle() ManagedHandle { + var toClose []*idleHandle + defer func() { + // NOTE: Must keep the closure around to late bind the toClose slice. + p.closeHandles(toClose) + }() + + now := p.options.getCurrentTime() + + p.mutex.Lock() + defer p.mutex.Unlock() + + var i int + for i = 0; i < len(p.idleHandles); i++ { + idle := p.idleHandles[i] + if idle.keepUntil == nil || now.Before(*idle.keepUntil) { + break + } + } + if i > 0 { + toClose = p.idleHandles[0:i] + } + + if i < len(p.idleHandles) { + idle := p.idleHandles[i] + p.idleHandles = p.idleHandles[i+1:] + return NewManagedHandle(p.location, idle.handle, p, p.options) + } + + if len(p.idleHandles) > 0 { + p.idleHandles = []*idleHandle{} + } + return nil +} + +// This adds an idle resource to the pool. +func (p *simpleResourcePool) queueIdleHandles(handle interface{}) { + var toClose []*idleHandle + defer func() { + // NOTE: Must keep the closure around to late bind the toClose slice. + p.closeHandles(toClose) + }() + + now := p.options.getCurrentTime() + var keepUntil *time.Time + if p.options.MaxIdleTime != nil { + // NOTE: Assign to temp variable first to work around compiler bug + x := now.Add(*p.options.MaxIdleTime) + keepUntil = &x + } + + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.isLameDuck { + toClose = []*idleHandle{ + {handle: handle}, + } + return + } + + p.idleHandles = append( + p.idleHandles, + &idleHandle{ + handle: handle, + keepUntil: keepUntil, + }) + + nIdleHandles := uint32(len(p.idleHandles)) + if nIdleHandles > p.options.MaxIdleHandles { + handlesToClose := nIdleHandles - p.options.MaxIdleHandles + toClose = p.idleHandles[0:handlesToClose] + p.idleHandles = p.idleHandles[handlesToClose:nIdleHandles] + } +} + +// Closes resources, at this point it is assumed that this resources +// are no longer referenced from the main idleHandles slice. +func (p *simpleResourcePool) closeHandles(handles []*idleHandle) { + for _, handle := range handles { + _ = p.options.Close(handle.handle) + } +} diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go index 97df49cb6..271baa132 100644 --- a/weed/wdclient/vid_map.go +++ b/weed/wdclient/vid_map.go @@ -15,21 +15,29 @@ const ( maxCursorIndex = 4096 ) +type HasLookupFileIdFunction interface { + GetLookupFileIdFunction() LookupFileIdFunctionType +} + +type LookupFileIdFunctionType func(fileId string) (targetUrls []string, err error) + type Location struct { - Url string `json:"url,omitempty"` - PublicUrl string `json:"publicUrl,omitempty"` + Url string `json:"url,omitempty"` + PublicUrl string `json:"publicUrl,omitempty"` + DataCenter string `json:"dataCenter,omitempty"` } type vidMap struct { sync.RWMutex vid2Locations map[uint32][]Location - - cursor int32 + DataCenter string + cursor int32 } -func newVidMap() vidMap { +func newVidMap(dataCenter string) vidMap { return vidMap{ vid2Locations: make(map[uint32][]Location), + DataCenter: dataCenter, cursor: -1, } } @@ -44,38 +52,44 @@ func (vc *vidMap) getLocationIndex(length int) (int, error) { return int(atomic.AddInt32(&vc.cursor, 1)) % length, nil } -func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrl string, err error) { +func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err error) { id, err := strconv.Atoi(vid) if err != nil { glog.V(1).Infof("Unknown volume id %s", vid) - return "", err + return nil, err } - return vc.GetRandomLocation(uint32(id)) -} - -func (vc *vidMap) LookupFileId(fileId string) (fullUrl string, err error) { - parts := strings.Split(fileId, ",") - if len(parts) != 2 { - return "", errors.New("Invalid fileId " + fileId) + locations, found := vc.GetLocations(uint32(id)) + if !found { + return nil, fmt.Errorf("volume %d not found", id) } - serverUrl, lookupError := vc.LookupVolumeServerUrl(parts[0]) - if lookupError != nil { - return "", lookupError + for _, loc := range locations { + if vc.DataCenter == "" || loc.DataCenter == "" || vc.DataCenter != loc.DataCenter { + serverUrls = append(serverUrls, loc.Url) + } else { + serverUrls = append([]string{loc.Url}, serverUrls...) + } } - return "http://" + serverUrl + "/" + fileId, nil + return } -func (vc *vidMap) LookupVolumeServer(fileId string) (volumeServer string, err error) { +func (vc *vidMap) GetLookupFileIdFunction() LookupFileIdFunctionType { + return vc.LookupFileId +} + +func (vc *vidMap) LookupFileId(fileId string) (fullUrls []string, err error) { parts := strings.Split(fileId, ",") if len(parts) != 2 { - return "", errors.New("Invalid fileId " + fileId) + return nil, errors.New("Invalid fileId " + fileId) } - serverUrl, lookupError := vc.LookupVolumeServerUrl(parts[0]) + serverUrls, lookupError := vc.LookupVolumeServerUrl(parts[0]) if lookupError != nil { - return "", lookupError + return nil, lookupError + } + for _, serverUrl := range serverUrls { + fullUrls = append(fullUrls, "http://"+serverUrl+"/"+fileId) } - return serverUrl, nil + return } func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) { @@ -99,23 +113,6 @@ func (vc *vidMap) GetLocations(vid uint32) (locations []Location, found bool) { return } -func (vc *vidMap) GetRandomLocation(vid uint32) (serverUrl string, err error) { - vc.RLock() - defer vc.RUnlock() - - locations := vc.vid2Locations[vid] - if len(locations) == 0 { - return "", fmt.Errorf("volume %d not found", vid) - } - - index, err := vc.getLocationIndex(len(locations)) - if err != nil { - return "", fmt.Errorf("volume %d: %v", vid, err) - } - - return locations[index].Url, nil -} - func (vc *vidMap) addLocation(vid uint32, location Location) { vc.Lock() defer vc.Unlock() diff --git a/weed/wdclient/vid_map_test.go b/weed/wdclient/vid_map_test.go index 87be2fc25..0cea698ac 100644 --- a/weed/wdclient/vid_map_test.go +++ b/weed/wdclient/vid_map_test.go @@ -45,7 +45,7 @@ func TestLocationIndex(t *testing.T) { mustOk(7, maxCursorIndex, 0) // test with constructor - vm = newVidMap() + vm = newVidMap("") length := 7 for i := 0; i < 100; i++ { got, err := vm.getLocationIndex(length) diff --git a/weed/wdclient/volume_tcp_client.go b/weed/wdclient/volume_tcp_client.go new file mode 100644 index 000000000..afebd71eb --- /dev/null +++ b/weed/wdclient/volume_tcp_client.go @@ -0,0 +1,97 @@ +package wdclient + +import ( + "bufio" + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient/net2" + "io" + "net" + "time" +) + +// VolumeTcpClient put/get/delete file chunks directly on volume servers without replication +type VolumeTcpClient struct { + cp net2.ConnectionPool +} + +type VolumeTcpConn struct { + net.Conn + bufWriter *bufio.Writer + bufReader *bufio.Reader +} + +func NewVolumeTcpClient() *VolumeTcpClient { + MaxIdleTime := 10 * time.Second + return &VolumeTcpClient{ + cp: net2.NewMultiConnectionPool(net2.ConnectionOptions{ + MaxActiveConnections: 16, + MaxIdleConnections: 1, + MaxIdleTime: &MaxIdleTime, + DialMaxConcurrency: 0, + Dial: func(network string, address string) (net.Conn, error) { + conn, err := net.Dial(network, address) + return &VolumeTcpConn{ + conn, + bufio.NewWriter(conn), + bufio.NewReader(conn), + }, err + }, + NowFunc: nil, + ReadTimeout: 0, + WriteTimeout: 0, + }), + } +} +func (c *VolumeTcpClient) PutFileChunk(volumeServerAddress string, fileId string, fileSize uint32, fileReader io.Reader) (err error) { + + tcpAddress, parseErr := pb.ParseServerAddress(volumeServerAddress, 20000) + if parseErr != nil { + return parseErr + } + + c.cp.Register("tcp", tcpAddress) + tcpConn, getErr := c.cp.Get("tcp", tcpAddress) + if getErr != nil { + return fmt.Errorf("get connection to %s: %v", tcpAddress, getErr) + } + conn := tcpConn.RawConn().(*VolumeTcpConn) + defer func() { + if err != nil { + tcpConn.DiscardConnection() + } else { + tcpConn.ReleaseConnection() + } + }() + + buf := []byte("+" + fileId + "\n") + _, err = conn.bufWriter.Write([]byte(buf)) + if err != nil { + return + } + util.Uint32toBytes(buf[0:4], fileSize) + _, err = conn.bufWriter.Write(buf[0:4]) + if err != nil { + return + } + _, err = io.Copy(conn.bufWriter, fileReader) + if err != nil { + return + } + conn.bufWriter.Write([]byte("!\n")) + conn.bufWriter.Flush() + + ret, _, err := conn.bufReader.ReadLine() + if err != nil { + glog.V(0).Infof("upload by tcp: %v", err) + return + } + if !bytes.HasPrefix(ret, []byte("+OK")) { + glog.V(0).Infof("upload by tcp: %v", string(ret)) + } + + return nil +} diff --git a/weed/weed.go b/weed/weed.go index ecb0ba2a4..6e69c1480 100644 --- a/weed/weed.go +++ b/weed/weed.go @@ -4,8 +4,8 @@ package main import ( - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "io" "math/rand" "os" |
