aboutsummaryrefslogtreecommitdiff
path: root/weed
diff options
context:
space:
mode:
Diffstat (limited to 'weed')
-rw-r--r--weed/command/backup.go6
-rw-r--r--weed/command/benchmark.go7
-rw-r--r--weed/command/download.go11
-rw-r--r--weed/command/filer.go102
-rw-r--r--weed/command/filer_backup.go12
-rw-r--r--weed/command/filer_cat.go2
-rw-r--r--weed/command/filer_copy.go25
-rw-r--r--weed/command/filer_meta_backup.go15
-rw-r--r--weed/command/filer_meta_tail.go2
-rw-r--r--weed/command/filer_remote_gateway.go2
-rw-r--r--weed/command/filer_remote_gateway_buckets.go4
-rw-r--r--weed/command/filer_remote_sync.go2
-rw-r--r--weed/command/filer_remote_sync_dir.go9
-rw-r--r--weed/command/filer_replication.go2
-rw-r--r--weed/command/filer_sync.go15
-rw-r--r--weed/command/iam.go2
-rw-r--r--weed/command/master.go20
-rw-r--r--weed/command/master_follower.go2
-rw-r--r--weed/command/mount_std.go2
-rw-r--r--weed/command/mq_broker.go2
-rw-r--r--weed/command/s3.go23
-rw-r--r--weed/command/scaffold/filer.toml2
-rw-r--r--weed/command/scaffold/security.toml17
-rw-r--r--weed/command/server.go12
-rw-r--r--weed/command/shell.go2
-rw-r--r--weed/command/update.go12
-rw-r--r--weed/command/upload.go2
-rw-r--r--weed/command/volume.go14
-rw-r--r--weed/command/webdav.go2
-rw-r--r--weed/filer/abstract_sql/abstract_sql_store.go6
-rw-r--r--weed/filer/filechunk_group.go5
-rw-r--r--weed/filer/filechunk_manifest.go5
-rw-r--r--weed/filer/filer.go24
-rw-r--r--weed/filer/filer_conf.go20
-rw-r--r--weed/filer/filer_delete_entry.go11
-rw-r--r--weed/filer/filer_deletion.go52
-rw-r--r--weed/filer/filer_notify.go112
-rw-r--r--weed/filer/filer_notify_append.go8
-rw-r--r--weed/filer/filer_notify_read.go352
-rw-r--r--weed/filer/filerstore_wrapper.go4
-rw-r--r--weed/filer/leveldb2/leveldb2_store.go6
-rw-r--r--weed/filer/leveldb3/leveldb3_store.go9
-rw-r--r--weed/filer/meta_replay.go38
-rw-r--r--weed/filer/reader_cache.go4
-rw-r--r--weed/filer/redis/redis_cluster_store.go2
-rw-r--r--weed/filer/redis/redis_store.go2
-rw-r--r--weed/filer/redis/universal_redis_store.go2
-rw-r--r--weed/filer/redis/universal_redis_store_kv.go2
-rw-r--r--weed/filer/redis2/redis_cluster_store.go2
-rw-r--r--weed/filer/redis2/redis_sentinel_store.go2
-rw-r--r--weed/filer/redis2/redis_store.go2
-rw-r--r--weed/filer/redis2/universal_redis_store.go4
-rw-r--r--weed/filer/redis2/universal_redis_store_kv.go2
-rw-r--r--weed/filer/redis3/ItemList.go6
-rw-r--r--weed/filer/redis3/item_list_serde.go2
-rw-r--r--weed/filer/redis3/kv_directory_children.go2
-rw-r--r--weed/filer/redis3/kv_directory_children_test.go8
-rw-r--r--weed/filer/redis3/redis_cluster_store.go4
-rw-r--r--weed/filer/redis3/redis_sentinel_store.go4
-rw-r--r--weed/filer/redis3/redis_store.go4
-rw-r--r--weed/filer/redis3/skiplist_element_store.go2
-rw-r--r--weed/filer/redis3/universal_redis_store.go2
-rw-r--r--weed/filer/redis3/universal_redis_store_kv.go2
-rw-r--r--weed/filer/redis_lua/redis_cluster_store.go2
-rw-r--r--weed/filer/redis_lua/redis_sentinel_store.go2
-rw-r--r--weed/filer/redis_lua/redis_store.go2
-rw-r--r--weed/filer/redis_lua/stored_procedure/init.go2
-rw-r--r--weed/filer/redis_lua/universal_redis_store.go2
-rw-r--r--weed/filer/redis_lua/universal_redis_store_kv.go2
-rw-r--r--weed/filer/stream.go5
-rw-r--r--weed/iamapi/iamapi_server.go2
-rw-r--r--weed/iamapi/iamapi_test.go2
-rw-r--r--weed/mount/filehandle.go14
-rw-r--r--weed/mount/filehandle_map.go8
-rw-r--r--weed/mount/filehandle_read.go14
-rw-r--r--weed/mount/locked_entry.go2
-rw-r--r--weed/mount/meta_cache/meta_cache.go7
-rw-r--r--weed/mount/meta_cache/meta_cache_subscribe.go8
-rw-r--r--weed/mount/weedfs.go5
-rw-r--r--weed/mount/weedfs_dir_lookup.go2
-rw-r--r--weed/mount/weedfs_dir_read.go8
-rw-r--r--weed/mount/weedfs_file_lseek.go4
-rw-r--r--weed/mount/weedfs_file_sync.go7
-rw-r--r--weed/mount/weedfs_write.go6
-rw-r--r--weed/mq/broker/broker_topic_partition_read_write.go3
-rw-r--r--weed/mq/broker/broker_write.go8
-rw-r--r--weed/mq/client/cmd/weed_pub_kv/publisher_kv.go2
-rw-r--r--weed/mq/client/cmd/weed_pub_record/publisher_record.go2
-rw-r--r--weed/mq/client/cmd/weed_sub_kv/subscriber_kv.go2
-rw-r--r--weed/mq/client/cmd/weed_sub_record/subscriber_record.go2
-rw-r--r--weed/operation/chunked_file.go9
-rw-r--r--weed/operation/delete_content.go14
-rw-r--r--weed/operation/needle_parse_test.go16
-rw-r--r--weed/operation/submit.go24
-rw-r--r--weed/operation/upload_content.go86
-rw-r--r--weed/pb/filer.proto15
-rw-r--r--weed/pb/filer_pb/filer.pb.go1492
-rw-r--r--weed/pb/filer_pb/filer_client_bfs.go36
-rw-r--r--weed/pb/filer_pb/filer_grpc.pb.go68
-rw-r--r--weed/pb/filer_pb_tail.go3
-rw-r--r--weed/remote_storage/traverse_bfs.go62
-rw-r--r--weed/replication/repl_util/replication_util.go4
-rw-r--r--weed/replication/sink/azuresink/azure_sink.go13
-rw-r--r--weed/replication/sink/filersink/fetch_write.go11
-rw-r--r--weed/replication/sink/filersink/filer_sink.go4
-rw-r--r--weed/replication/source/filer_source.go5
-rw-r--r--weed/s3api/AmazonS3.xsd1
-rw-r--r--weed/s3api/README.txt8
-rw-r--r--weed/s3api/auto_signature_v4_test.go18
-rw-r--r--weed/s3api/s3api_acl_helper.go4
-rw-r--r--weed/s3api/s3api_bucket_handlers.go34
-rw-r--r--weed/s3api/s3api_bucket_handlers_test.go29
-rw-r--r--weed/s3api/s3api_object_handlers.go48
-rw-r--r--weed/s3api/s3api_object_handlers_copy.go9
-rw-r--r--weed/s3api/s3api_object_handlers_delete.go22
-rw-r--r--weed/s3api/s3api_object_handlers_list.go122
-rw-r--r--weed/s3api/s3api_object_handlers_list_test.go2
-rw-r--r--weed/s3api/s3api_object_handlers_put.go2
-rw-r--r--weed/s3api/s3api_server.go107
-rw-r--r--weed/s3api/s3api_xsd_generated.go1066
-rw-r--r--weed/s3api/s3api_xsd_generated_helper.go10
-rw-r--r--weed/s3api/s3err/error_handler.go1
-rw-r--r--weed/security/tls.go100
-rw-r--r--weed/sequence/memory_sequencer.go4
-rw-r--r--weed/sequence/sequence.go1
-rw-r--r--weed/sequence/snowflake_sequencer.go5
-rw-r--r--weed/server/common.go41
-rw-r--r--weed/server/filer_grpc_server.go9
-rw-r--r--weed/server/filer_grpc_server_admin.go2
-rw-r--r--weed/server/filer_grpc_server_rename.go2
-rw-r--r--weed/server/filer_grpc_server_traverse_meta.go84
-rw-r--r--weed/server/filer_grpc_server_traverse_meta_test.go31
-rw-r--r--weed/server/filer_server.go6
-rw-r--r--weed/server/filer_server_handlers_proxy.go17
-rw-r--r--weed/server/filer_server_handlers_read.go12
-rw-r--r--weed/server/filer_server_handlers_read_dir.go8
-rw-r--r--weed/server/filer_server_handlers_write.go5
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go21
-rw-r--r--weed/server/filer_server_handlers_write_cipher.go8
-rw-r--r--weed/server/filer_server_handlers_write_upload.go8
-rw-r--r--weed/server/filer_ui/breadcrumb.go3
-rw-r--r--weed/server/filer_ui/breadcrumb_test.go86
-rw-r--r--weed/server/filer_ui/filer.html6
-rw-r--r--weed/server/master_grpc_server_assign.go15
-rw-r--r--weed/server/master_grpc_server_volume.go58
-rw-r--r--weed/server/master_server.go36
-rw-r--r--weed/server/master_server_handlers.go32
-rw-r--r--weed/server/master_server_handlers_admin.go22
-rw-r--r--weed/server/raft_hashicorp.go96
-rw-r--r--weed/server/raft_server.go3
-rw-r--r--weed/server/volume_grpc_remote.go13
-rw-r--r--weed/server/volume_server_handlers_read.go29
-rw-r--r--weed/server/volume_server_handlers_write.go6
-rw-r--r--weed/server/webdav_server.go30
-rw-r--r--weed/shell/command_ec_balance.go8
-rw-r--r--weed/shell/command_ec_rebuild.go2
-rw-r--r--weed/shell/command_fs_merge_volumes.go23
-rw-r--r--weed/shell/command_fs_verify.go203
-rw-r--r--weed/shell/command_remote_uncache.go5
-rw-r--r--weed/shell/command_s3_bucket_quota_check.go2
-rw-r--r--weed/shell/command_s3_clean_uploads.go3
-rw-r--r--weed/shell/command_volume_check_disk.go2
-rw-r--r--weed/shell/command_volume_fsck.go93
-rw-r--r--weed/shell/command_volume_grow.go64
-rw-r--r--weed/shell/command_volume_server_evacuate.go7
-rw-r--r--weed/shell/command_volume_tier_upload.go5
-rw-r--r--weed/stats/metrics.go10
-rw-r--r--weed/storage/disk_location.go10
-rw-r--r--weed/storage/disk_location_test.go78
-rw-r--r--weed/storage/needle/needle_parse_upload.go2
-rw-r--r--weed/topology/data_node.go2
-rw-r--r--weed/topology/store_replicate.go10
-rw-r--r--weed/topology/topology.go67
-rw-r--r--weed/topology/topology_event_handling.go5
-rw-r--r--weed/topology/topology_info.go9
-rw-r--r--weed/topology/volume_growth.go18
-rw-r--r--weed/topology/volume_layout.go67
-rw-r--r--weed/util/bytes.go6
-rw-r--r--weed/util/config.go7
-rw-r--r--weed/util/constants.go4
-rw-r--r--weed/util/http/client/http_client.go201
-rw-r--r--weed/util/http/client/http_client_interface.go16
-rw-r--r--weed/util/http/client/http_client_name.go14
-rw-r--r--weed/util/http/client/http_client_name_string.go23
-rw-r--r--weed/util/http/client/http_client_opt.go18
-rw-r--r--weed/util/http/http_global_client_init.go27
-rw-r--r--weed/util/http/http_global_client_util.go (renamed from weed/util/http_util.go)71
-rw-r--r--weed/util/queue.go41
-rw-r--r--weed/util/queue_test.go22
-rw-r--r--weed/weed.go2
190 files changed, 4416 insertions, 2042 deletions
diff --git a/weed/command/backup.go b/weed/command/backup.go
index a8be4838e..f9b9fba64 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -66,7 +66,7 @@ var cmdBackup = &Command{
func runBackup(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
if *s.volumeId == -1 {
@@ -138,7 +138,9 @@ func runBackup(cmd *Command, args []string) bool {
if datSize > stats.TailOffset {
// remove the old data
- v.Destroy(false)
+ if err := v.Destroy(false); err != nil {
+ fmt.Printf("Error destroying volume: %v\n", err)
+ }
// recreate an empty volume
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
if err != nil {
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 2a0db47c2..bc7ee1292 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -22,6 +22,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type BenchmarkOptions struct {
@@ -111,7 +112,7 @@ var (
func runBenchmark(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
@@ -214,7 +215,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if isSecure {
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(context.Background()), b.grpcDialOption, df.fp.Fid)
}
- if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
+ if e := util_http.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
s.completed++
} else {
s.failed++
@@ -295,7 +296,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
}
var bytes []byte
for _, url := range urls {
- bytes, _, err = util.Get(url)
+ bytes, _, err = util_http.Get(url)
if err == nil {
break
}
diff --git a/weed/command/download.go b/weed/command/download.go
index 060be9f14..1b7098824 100644
--- a/weed/command/download.go
+++ b/weed/command/download.go
@@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -47,7 +48,7 @@ var cmdDownload = &Command{
}
func runDownload(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for _, fid := range args {
@@ -63,11 +64,11 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti
if lookupError != nil {
return lookupError
}
- filename, _, rc, err := util.DownloadFile(fileUrl, jwt)
+ filename, _, rc, err := util_http.DownloadFile(fileUrl, jwt)
if err != nil {
return err
}
- defer util.CloseResponse(rc)
+ defer util_http.CloseResponse(rc)
if filename == "" {
filename = fileId
}
@@ -116,10 +117,10 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption
return "", nil, lookupError
}
var rc *http.Response
- if filename, _, rc, e = util.DownloadFile(fileUrl, jwt); e != nil {
+ if filename, _, rc, e = util_http.DownloadFile(fileUrl, jwt); e != nil {
return "", nil, e
}
- defer util.CloseResponse(rc)
+ defer util_http.CloseResponse(rc)
content, e = io.ReadAll(rc.Body)
return
}
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 877c4b5d5..b7f67ea3b 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -1,6 +1,9 @@
package command
import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
"fmt"
"net"
"net/http"
@@ -10,8 +13,6 @@ import (
"strings"
"time"
- "google.golang.org/grpc/reflection"
-
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
@@ -20,6 +21,10 @@ import (
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
+ "github.com/spf13/viper"
+ "google.golang.org/grpc/credentials/tls/certprovider"
+ "google.golang.org/grpc/credentials/tls/certprovider/pemfile"
+ "google.golang.org/grpc/reflection"
)
var (
@@ -52,6 +57,7 @@ type FilerOptions struct {
disableHttp *bool
cipher *bool
metricsHttpPort *int
+ metricsHttpIp *string
saveToFilerLimit *int
defaultLevelDbDirectory *string
concurrentUploadLimitMB *int
@@ -63,7 +69,7 @@ type FilerOptions struct {
diskType *string
allowedOrigins *string
exposeDirectoryData *bool
- joinExistingFiler *bool
+ certProvider certprovider.Provider
}
func init() {
@@ -85,6 +91,7 @@ func init() {
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ f.metricsHttpIp = cmdFiler.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
@@ -96,7 +103,6 @@ func init() {
f.diskType = cmdFiler.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
f.allowedOrigins = cmdFiler.Flag.String("allowedOrigins", "*", "comma separated list of allowed origins")
f.exposeDirectoryData = cmdFiler.Flag.Bool("exposeDirectoryData", true, "whether to return directory metadata and content in Filer UI")
- f.joinExistingFiler = cmdFiler.Flag.Bool("joinExistingFiler", false, "enable if new filer wants to join existing cluster")
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
@@ -124,6 +130,7 @@ func init() {
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB")
+ filerWebDavOptions.maxMB = cmdFiler.Flag.Int("webdav.maxMB", 4, "split files larger than the limit")
filerWebDavOptions.filerRootPath = cmdFiler.Flag.String("webdav.filer.path", "/", "use this remote path from filer server")
// start iam on filer
@@ -172,9 +179,17 @@ func runFiler(cmd *Command, args []string) bool {
go http.ListenAndServe(fmt.Sprintf(":%d", *f.debugPort), nil)
}
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
- go stats_collect.StartMetricsServer(*f.bindIp, *f.metricsHttpPort)
+ switch {
+ case *f.metricsHttpIp != "":
+ // noting to do, use f.metricsHttpIp
+ case *f.bindIp != "":
+ *f.metricsHttpIp = *f.bindIp
+ case *f.ip != "":
+ *f.metricsHttpIp = *f.ip
+ }
+ go stats_collect.StartMetricsServer(*f.metricsHttpIp, *f.metricsHttpPort)
filerAddress := pb.NewServerAddress(*f.ip, *f.port, *f.portGrpc).String()
startDelay := time.Duration(2)
@@ -222,6 +237,15 @@ func runFiler(cmd *Command, args []string) bool {
return true
}
+// GetCertificateWithUpdate Auto refreshing TSL certificate
+func (fo *FilerOptions) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
+ certs, err := fo.certProvider.KeyMaterial(context.Background())
+ if certs == nil {
+ return nil, err
+ }
+ return &certs.Certs[0], err
+}
+
func (fo *FilerOptions) startFiler() {
defaultMux := http.NewServeMux()
@@ -264,7 +288,6 @@ func (fo *FilerOptions) startFiler() {
DownloadMaxBytesPs: int64(*fo.downloadMaxMBps) * 1024 * 1024,
DiskType: *fo.diskType,
AllowedOrigins: strings.Split(*fo.allowedOrigins, ","),
- JoinExistingFiler: *fo.joinExistingFiler,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
@@ -332,15 +355,62 @@ func (fo *FilerOptions) startFiler() {
httpS.Serve(filerSocketListener)
}()
}
- if filerLocalListener != nil {
- go func() {
- if err := httpS.Serve(filerLocalListener); err != nil {
- glog.Errorf("Filer Fail to serve: %v", e)
+
+ if viper.GetString("https.filer.key") != "" {
+ certFile := viper.GetString("https.filer.cert")
+ keyFile := viper.GetString("https.filer.key")
+ caCertFile := viper.GetString("https.filer.ca")
+ disbaleTlsVerifyClientCert := viper.GetBool("https.filer.disable_tls_verify_client_cert")
+
+ pemfileOptions := pemfile.Options{
+ CertFile: certFile,
+ KeyFile: keyFile,
+ RefreshDuration: security.CredRefreshingInterval,
+ }
+ if fo.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil {
+ glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
+ }
+
+ caCertPool := x509.NewCertPool()
+ if caCertFile != "" {
+ caCertFile, err := os.ReadFile(caCertFile)
+ if err != nil {
+ glog.Fatalf("error reading CA certificate: %v", err)
}
- }()
- }
- if err := httpS.Serve(filerListener); err != nil {
- glog.Fatalf("Filer Fail to serve: %v", e)
- }
+ caCertPool.AppendCertsFromPEM(caCertFile)
+ }
+
+ clientAuth := tls.NoClientCert
+ if !disbaleTlsVerifyClientCert {
+ clientAuth = tls.RequireAndVerifyClientCert
+ }
+
+ httpS.TLSConfig = &tls.Config{
+ GetCertificate: fo.GetCertificateWithUpdate,
+ ClientAuth: clientAuth,
+ ClientCAs: caCertPool,
+ }
+ if filerLocalListener != nil {
+ go func() {
+ if err := httpS.ServeTLS(filerLocalListener, "", ""); err != nil {
+ glog.Errorf("Filer Fail to serve: %v", e)
+ }
+ }()
+ }
+ if err := httpS.ServeTLS(filerListener, "", ""); err != nil {
+ glog.Fatalf("Filer Fail to serve: %v", e)
+ }
+ } else {
+ if filerLocalListener != nil {
+ go func() {
+ if err := httpS.Serve(filerLocalListener); err != nil {
+ glog.Errorf("Filer Fail to serve: %v", e)
+ }
+ }()
+ }
+ if err := httpS.Serve(filerListener); err != nil {
+ glog.Fatalf("Filer Fail to serve: %v", e)
+ }
+ }
}
diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go
index 4aeab60f2..1344dfd2c 100644
--- a/weed/command/filer_backup.go
+++ b/weed/command/filer_backup.go
@@ -9,6 +9,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"regexp"
+ "strings"
"time"
)
@@ -58,7 +59,7 @@ var cmdFilerBackup = &Command{
func runFilerBackup(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
util.LoadConfiguration("replication", true)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
@@ -148,17 +149,22 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
}()
}
+ prefix := sourcePath
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "backup_" + dataSink.GetName(),
ClientId: clientId,
ClientEpoch: clientEpoch,
SelfSignature: 0,
- PathPrefix: sourcePath,
+ PathPrefix: prefix,
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: startFrom.UnixNano(),
StopTsNs: 0,
- EventErrorType: pb.TrivialOnError,
+ EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(sourceFiler, grpcDialOption, metadataFollowOption, processEventFnWithOffset)
diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go
index 2ef3bfc33..ba3625b0d 100644
--- a/weed/command/filer_cat.go
+++ b/weed/command/filer_cat.go
@@ -59,7 +59,7 @@ var cmdFilerCat = &Command{
func runFilerCat(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
if len(args) == 0 {
return false
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index df5e002c5..0342aa585 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -83,7 +83,7 @@ var cmdFilerCopy = &Command{
func runCopy(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
if len(args) <= 1 {
return false
@@ -344,7 +344,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
return err
}
- finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry(
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil {
+ return uploaderErr
+ }
+
+ finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
worker,
&filer_pb.AssignVolumeRequest{
Count: 1,
@@ -423,7 +428,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
<-concurrentChunks
}()
- fileId, uploadResult, err, _ := operation.UploadWithRetry(
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
+ return
+ }
+
+ fileId, uploadResult, err, _ := uploader.UploadWithRetry(
worker,
&filer_pb.AssignVolumeRequest{
Count: 1,
@@ -472,7 +483,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
- operation.DeleteFiles(func(_ context.Context) pb.ServerAddress {
+ operation.DeleteFileIds(func(_ context.Context) pb.ServerAddress {
return pb.ServerAddress(copy.masters[0])
}, false, worker.options.grpcDialOption, fileIds)
return uploadError
@@ -535,8 +546,12 @@ func detectMimeType(f *os.File) string {
}
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil {
+ return nil, fmt.Errorf("upload data: %v", uploaderErr)
+ }
- finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry(
+ finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
worker,
&filer_pb.AssignVolumeRequest{
Count: 1,
diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go
index ff4a61e41..e8c4680ba 100644
--- a/weed/command/filer_meta_backup.go
+++ b/weed/command/filer_meta_backup.go
@@ -8,6 +8,7 @@ import (
"github.com/spf13/viper"
"google.golang.org/grpc"
"reflect"
+ "strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
@@ -55,7 +56,7 @@ The backup writes to another filer store specified in a backup_filer.toml.
func runFilerMetaBackup(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
// load backup_filer.toml
@@ -63,9 +64,9 @@ func runFilerMetaBackup(cmd *Command, args []string) bool {
v.SetConfigFile(*metaBackup.backupFilerConfig)
if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file
- glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+
+ glog.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+
" weed scaffold -config=%s -output=.\n\n\n",
- *metaBackup.backupFilerConfig, "backup_filer", "filer")
+ *metaBackup.backupFilerConfig, err, "backup_filer", "filer")
}
if err := metaBackup.initStore(v); err != nil {
@@ -197,17 +198,21 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
metaBackup.clientEpoch++
+ prefix := *metaBackup.filerDirectory
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "meta_backup",
ClientId: metaBackup.clientId,
ClientEpoch: metaBackup.clientEpoch,
SelfSignature: 0,
- PathPrefix: *metaBackup.filerDirectory,
+ PathPrefix: prefix,
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: startTime.UnixNano(),
StopTsNs: 0,
- EventErrorType: pb.TrivialOnError,
+ EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, metadataFollowOption, processEventFnWithOffset)
diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go
index 32855072b..d7a169535 100644
--- a/weed/command/filer_meta_tail.go
+++ b/weed/command/filer_meta_tail.go
@@ -45,7 +45,7 @@ var (
func runFilerMetaTail(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
clientId := util.RandomInt32()
diff --git a/weed/command/filer_remote_gateway.go b/weed/command/filer_remote_gateway.go
index 61a5d26a2..78357cc04 100644
--- a/weed/command/filer_remote_gateway.go
+++ b/weed/command/filer_remote_gateway.go
@@ -78,7 +78,7 @@ var cmdFilerRemoteGateway = &Command{
func runFilerRemoteGateway(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
remoteGatewayOptions.grpcDialOption = grpcDialOption
diff --git a/weed/command/filer_remote_gateway_buckets.go b/weed/command/filer_remote_gateway_buckets.go
index 9bb59dabb..f6fe9a99c 100644
--- a/weed/command/filer_remote_gateway_buckets.go
+++ b/weed/command/filer_remote_gateway_buckets.go
@@ -55,12 +55,12 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo
ClientId: option.clientId,
ClientEpoch: option.clientEpoch,
SelfSignature: 0,
- PathPrefix: option.bucketsDir,
+ PathPrefix: option.bucketsDir + "/",
AdditionalPathPrefixes: []string{filer.DirectoryEtcRemote},
DirectoriesToWatch: nil,
StartTsNs: lastOffsetTs.UnixNano(),
StopTsNs: 0,
- EventErrorType: pb.TrivialOnError,
+ EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset)
diff --git a/weed/command/filer_remote_sync.go b/weed/command/filer_remote_sync.go
index 2d6133367..77dd95134 100644
--- a/weed/command/filer_remote_sync.go
+++ b/weed/command/filer_remote_sync.go
@@ -73,7 +73,7 @@ var cmdFilerRemoteSynchronize = &Command{
func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
remoteSyncOptions.grpcDialOption = grpcDialOption
diff --git a/weed/command/filer_remote_sync_dir.go b/weed/command/filer_remote_sync_dir.go
index 76f7e46d5..186523e45 100644
--- a/weed/command/filer_remote_sync_dir.go
+++ b/weed/command/filer_remote_sync_dir.go
@@ -64,17 +64,22 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
option.clientEpoch++
+ prefix := mountedDir
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "filer.remote.sync",
ClientId: option.clientId,
ClientEpoch: option.clientEpoch,
SelfSignature: 0,
- PathPrefix: mountedDir,
+ PathPrefix: prefix,
AdditionalPathPrefixes: []string{filer.DirectoryEtcRemote},
DirectoriesToWatch: nil,
StartTsNs: lastOffsetTs.UnixNano(),
StopTsNs: 0,
- EventErrorType: pb.TrivialOnError,
+ EventErrorType: pb.RetryForeverOnError,
}
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset)
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index 4fca8158a..f53fdfb48 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -30,7 +30,7 @@ var cmdFilerReplicate = &Command{
func runFilerReplicate(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
util.LoadConfiguration("replication", true)
util.LoadConfiguration("notification", true)
config := util.GetViper()
diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go
index 9ad76e31b..90204af4a 100644
--- a/weed/command/filer_sync.go
+++ b/weed/command/filer_sync.go
@@ -118,7 +118,7 @@ var cmdFilerSynchronize = &Command{
func runFilerSynchronize(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
@@ -296,12 +296,17 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti
return setOffset(grpcDialOption, targetFiler, getSignaturePrefixByPath(sourcePath), sourceFilerSignature, offsetTsNs)
})
+ prefix := sourcePath
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: clientName,
ClientId: clientId,
ClientEpoch: clientEpoch,
SelfSignature: targetFilerSignature,
- PathPrefix: sourcePath,
+ PathPrefix: prefix,
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: sourceFilerOffsetTsNs,
@@ -469,14 +474,14 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
}
} else {
- // new key is outside of the watched directory
+ // new key is outside the watched directory
if doDeleteFiles {
key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath)
return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
}
}
} else {
- // old key is outside of the watched directory
+ // old key is outside the watched directory
if strings.HasPrefix(string(sourceNewKey), sourcePath) {
// new key is in the watched directory
key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath)
@@ -486,7 +491,7 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
return nil
}
} else {
- // new key is also outside of the watched directory
+ // new key is also outside the watched directory
// skip
}
}
diff --git a/weed/command/iam.go b/weed/command/iam.go
index 95964994f..fa21803dd 100644
--- a/weed/command/iam.go
+++ b/weed/command/iam.go
@@ -47,7 +47,7 @@ func runIam(cmd *Command, args []string) bool {
func (iamopt *IamOptions) startIamServer() bool {
filerAddress := pb.ServerAddress(*iamopt.filer)
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for {
err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
diff --git a/weed/command/master.go b/weed/command/master.go
index f80d8faeb..914853d88 100644
--- a/weed/command/master.go
+++ b/weed/command/master.go
@@ -53,6 +53,7 @@ type MasterOptions struct {
metricsIntervalSec *int
raftResumeState *bool
metricsHttpPort *int
+ metricsHttpIp *string
heartbeatInterval *time.Duration
electionTimeout *time.Duration
raftHashicorp *bool
@@ -77,6 +78,7 @@ func init() {
m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address <host>:<port>")
m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
m.metricsHttpPort = cmdMaster.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ m.metricsHttpIp = cmdMaster.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server")
m.heartbeatInterval = cmdMaster.Flag.Duration("heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers")
@@ -103,7 +105,7 @@ var (
func runMaster(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
util.LoadConfiguration("master", false)
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
@@ -121,7 +123,15 @@ func runMaster(cmd *Command, args []string) bool {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
}
- go stats_collect.StartMetricsServer(*m.ipBind, *m.metricsHttpPort)
+ switch {
+ case *m.metricsHttpIp != "":
+ // noting to do, use m.metricsHttpIp
+ case *m.ipBind != "":
+ *m.metricsHttpIp = *m.ipBind
+ case *m.ip != "":
+ *m.metricsHttpIp = *m.ip
+ }
+ go stats_collect.StartMetricsServer(*m.metricsHttpIp, *m.metricsHttpPort)
startMaster(m, masterWhiteList)
return true
@@ -180,10 +190,10 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
}
ms.SetRaftServer(raftServer)
- r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
- r.HandleFunc("/cluster/healthz", raftServer.HealthzHandler).Methods("GET", "HEAD")
+ r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods(http.MethodGet)
+ r.HandleFunc("/cluster/healthz", raftServer.HealthzHandler).Methods(http.MethodGet, http.MethodHead)
if *masterOption.raftHashicorp {
- r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods("GET")
+ r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods(http.MethodGet)
}
// starting grpc server
grpcPort := *masterOption.portGrpc
diff --git a/weed/command/master_follower.go b/weed/command/master_follower.go
index 7217aff0b..504ddb6c3 100644
--- a/weed/command/master_follower.go
+++ b/weed/command/master_follower.go
@@ -68,7 +68,7 @@ var cmdMasterFollower = &Command{
func runMasterFollower(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
util.LoadConfiguration("master", false)
if *mf.portGrpc == 0 {
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 742c38180..a5325b11e 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -66,7 +66,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// try to connect to filer
filerAddresses := pb.ServerAddresses(*option.filer).ToAddresses()
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
var cipher bool
var err error
diff --git a/weed/command/mq_broker.go b/weed/command/mq_broker.go
index e093ebc56..5eb304204 100644
--- a/weed/command/mq_broker.go
+++ b/weed/command/mq_broker.go
@@ -54,7 +54,7 @@ var cmdMqBroker = &Command{
func runMqBroker(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
mqBrokerStandaloneOptions.masters = pb.ServerAddresses(*mqBrokerStandaloneOptions.mastersString).ToAddressMap()
diff --git a/weed/command/s3.go b/weed/command/s3.go
index b7bb2a546..e568de91b 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -49,6 +49,7 @@ type S3Options struct {
tlsCACertificate *string
tlsVerifyClientCert *bool
metricsHttpPort *int
+ metricsHttpIp *string
allowEmptyFolder *bool
allowDeleteBucketNotEmpty *bool
auditLogConfig *string
@@ -75,6 +76,7 @@ func init() {
s3StandaloneOptions.tlsCACertificate = cmdS3.Flag.String("cacert.file", "", "path to the TLS CA certificate file")
s3StandaloneOptions.tlsVerifyClientCert = cmdS3.Flag.Bool("tlsVerifyClientCert", false, "whether to verify the client's certificate")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ s3StandaloneOptions.metricsHttpIp = cmdS3.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", true, "allow empty folders")
s3StandaloneOptions.allowDeleteBucketNotEmpty = cmdS3.Flag.Bool("allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket")
s3StandaloneOptions.localFilerSocket = cmdS3.Flag.String("localFilerSocket", "", "local filer socket path")
@@ -163,17 +165,26 @@ var cmdS3 = &Command{
func runS3(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
- go stats_collect.StartMetricsServer(*s3StandaloneOptions.bindIp, *s3StandaloneOptions.metricsHttpPort)
+ switch {
+ case *s3StandaloneOptions.metricsHttpIp != "":
+ // noting to do, use s3StandaloneOptions.metricsHttpIp
+ case *s3StandaloneOptions.bindIp != "":
+ *s3StandaloneOptions.metricsHttpIp = *s3StandaloneOptions.bindIp
+ }
+ go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpIp, *s3StandaloneOptions.metricsHttpPort)
return s3StandaloneOptions.startS3Server()
}
// GetCertificateWithUpdate Auto refreshing TSL certificate
-func (S3opt *S3Options) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
- certs, err := S3opt.certProvider.KeyMaterial(context.Background())
+func (s3opt *S3Options) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
+ certs, err := s3opt.certProvider.KeyMaterial(context.Background())
+ if certs == nil {
+ return nil, err
+ }
return &certs.Certs[0], err
}
@@ -320,6 +331,10 @@ func (s3opt *S3Options) startS3Server() bool {
ClientAuth: clientAuth,
ClientCAs: caCertPool,
}
+ err = security.FixTlsConfig(util.GetViper(), httpS.TLSConfig)
+ if err != nil {
+ glog.Fatalf("error with tls config: %v", err)
+ }
if *s3opt.portHttps == 0 {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if s3ApiLocalListener != nil {
diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml
index 574125207..728aecb53 100644
--- a/weed/command/scaffold/filer.toml
+++ b/weed/command/scaffold/filer.toml
@@ -285,7 +285,7 @@ password = ""
ssl = false
ssl_ca_file = ""
ssl_cert_file = ""
-ssl_key_file = "
+ssl_key_file = ""
insecure_skip_verify = false
option_pool_size = 0
database = "seaweedfs"
diff --git a/weed/command/scaffold/security.toml b/weed/command/scaffold/security.toml
index c5b2a563c..113e5b016 100644
--- a/weed/command/scaffold/security.toml
+++ b/weed/command/scaffold/security.toml
@@ -95,18 +95,29 @@ allowed_commonNames = "" # comma-separated SSL certificate common names
cert = ""
key = ""
-# volume server https options
-# Note: work in progress!
-# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
+# https client for master|volume|filer|etc connection
+# It is necessary that the parameters [https.volume]|[https.master]|[https.filer] are set
[https.client]
enabled = true
+cert = ""
+key = ""
+ca = ""
+# volume server https options
[https.volume]
cert = ""
key = ""
ca = ""
+# master server https options
[https.master]
cert = ""
key = ""
ca = ""
+
+# filer server https options
+[https.filer]
+cert = ""
+key = ""
+ca = ""
+# disable_tls_verify_client_cert = true|false (default: false)
diff --git a/weed/command/server.go b/weed/command/server.go
index 503927629..ddcaf1f7e 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -66,6 +66,7 @@ var (
volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).")
volumeMinFreeSpace = cmdServer.Flag.String("volume.minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.")
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ serverMetricsHttpIp = cmdServer.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
@@ -97,7 +98,7 @@ func init() {
masterOptions.metricsIntervalSec = cmdServer.Flag.Int("master.metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
masterOptions.raftResumeState = cmdServer.Flag.Bool("master.resumeState", false, "resume previous state on start master server")
masterOptions.raftHashicorp = cmdServer.Flag.Bool("master.raftHashicorp", false, "use hashicorp raft")
- masterOptions.raftBootstrap = cmdMaster.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
+ masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")
@@ -119,7 +120,6 @@ func init() {
filerOptions.downloadMaxMBps = cmdServer.Flag.Int("filer.downloadMaxMBps", 0, "download max speed for each download request, in MB per second")
filerOptions.diskType = cmdServer.Flag.String("filer.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
filerOptions.exposeDirectoryData = cmdServer.Flag.Bool("filer.exposeDirectoryData", true, "expose directory data via filer. If false, filer UI will be innaccessible.")
- filerOptions.joinExistingFiler = cmdServer.Flag.Bool("filer.joinExistingFiler", false, "enable if new filer wants to join existing cluster")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.portGrpc = cmdServer.Flag.Int("volume.port.grpc", 0, "volume server grpc listen port")
@@ -179,7 +179,7 @@ func runServer(cmd *Command, args []string) bool {
go http.ListenAndServe(fmt.Sprintf(":%d", *serverOptions.debugPort), nil)
}
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
util.LoadConfiguration("master", false)
grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile)
@@ -207,6 +207,10 @@ func runServer(cmd *Command, args []string) bool {
serverBindIp = serverIp
}
+ if *serverMetricsHttpIp == "" {
+ *serverMetricsHttpIp = *serverBindIp
+ }
+
// ip address
masterOptions.ip = serverIp
masterOptions.ipBind = serverBindIp
@@ -245,7 +249,7 @@ func runServer(cmd *Command, args []string) bool {
webdavOptions.filer = &filerAddress
mqBrokerOptions.filerGroup = filerOptions.filerGroup
- go stats_collect.StartMetricsServer(*serverBindIp, *serverMetricsHttpPort)
+ go stats_collect.StartMetricsServer(*serverMetricsHttpIp, *serverMetricsHttpPort)
folders := strings.Split(*volumeDataFolders, ",")
diff --git a/weed/command/shell.go b/weed/command/shell.go
index f78ba89fc..1e921411b 100644
--- a/weed/command/shell.go
+++ b/weed/command/shell.go
@@ -35,7 +35,7 @@ var cmdShell = &Command{
func runShell(command *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
shellOptions.Directory = "/"
diff --git a/weed/command/update.go b/weed/command/update.go
index 314a903f2..bf871d654 100644
--- a/weed/command/update.go
+++ b/weed/command/update.go
@@ -20,6 +20,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"golang.org/x/net/context/ctxhttp"
)
@@ -198,7 +199,7 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R
if err != nil {
return Release{}, err
}
- defer util.CloseResponse(res)
+ defer util_http.CloseResponse(res)
if res.StatusCode != http.StatusOK {
content := res.Header.Get("Content-Type")
@@ -258,7 +259,7 @@ func getGithubData(ctx context.Context, url string) ([]byte, error) {
if err != nil {
return nil, err
}
- defer util.CloseResponse(res)
+ defer util_http.CloseResponse(res)
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
@@ -308,7 +309,12 @@ func extractToFile(buf []byte, filename, target string) error {
trd := tar.NewReader(gr)
hdr, terr := trd.Next()
if terr != nil {
- glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr)
+ if hdr != nil {
+ glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr)
+ } else {
+ glog.Errorf("uncompress file is nil, failed:%s", terr)
+ }
+
return terr
}
rd = trd
diff --git a/weed/command/upload.go b/weed/command/upload.go
index 3e6b8f9a2..7135a707a 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -69,7 +69,7 @@ var cmdUpload = &Command{
func runUpload(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
defaultReplication, err := readMasterConfiguration(grpcDialOption, pb.ServerAddress(*upload.master))
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 852989d1f..1078d8d6c 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -65,6 +65,7 @@ type VolumeServerOptions struct {
pprof *bool
preStopSeconds *int
metricsHttpPort *int
+ metricsHttpIp *string
// pulseSeconds *int
inflightUploadDataTimeout *time.Duration
hasSlowRead *bool
@@ -99,6 +100,7 @@ func init() {
v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
+ v.metricsHttpIp = cmdVolume.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.")
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", true, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
@@ -123,7 +125,7 @@ var (
func runVolume(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
// If --pprof is set we assume the caller wants to be able to collect
// cpu and memory profiles via go tool pprof
@@ -131,7 +133,15 @@ func runVolume(cmd *Command, args []string) bool {
grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
}
- go stats_collect.StartMetricsServer(*v.bindIp, *v.metricsHttpPort)
+ switch {
+ case *v.metricsHttpIp != "":
+ // noting to do, use v.metricsHttpIp
+ case *v.bindIp != "":
+ *v.metricsHttpIp = *v.bindIp
+ case *v.ip != "":
+ *v.metricsHttpIp = *v.ip
+ }
+ go stats_collect.StartMetricsServer(*v.metricsHttpIp, *v.metricsHttpPort)
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses()
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
index f0e738f4a..1d1a43eda 100644
--- a/weed/command/webdav.go
+++ b/weed/command/webdav.go
@@ -60,7 +60,7 @@ var cmdWebDav = &Command{
func runWebDav(cmd *Command, args []string) bool {
- util.LoadConfiguration("security", false)
+ util.LoadSecurityConfiguration()
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
index ee2afa30f..1d175651d 100644
--- a/weed/filer/abstract_sql/abstract_sql_store.go
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -7,6 +7,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
"github.com/seaweedfs/seaweedfs/weed/util"
"strings"
"sync"
@@ -140,6 +141,8 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.Full
}
}
+ } else {
+ err = fmt.Errorf("invalid bucket name %s", bucket)
}
return
@@ -340,6 +343,9 @@ func (store *AbstractSqlStore) Shutdown() {
}
func isValidBucket(bucket string) bool {
+ if s3bucket.VerifyS3BucketName(bucket) != nil {
+ return false
+ }
return bucket != DEFAULT_TABLE && bucket != ""
}
diff --git a/weed/filer/filechunk_group.go b/weed/filer/filechunk_group.go
index c89527710..dbd08b42b 100644
--- a/weed/filer/filechunk_group.go
+++ b/weed/filer/filechunk_group.go
@@ -1,10 +1,11 @@
package filer
import (
+ "sync"
+
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
- "sync"
)
type ChunkGroup struct {
@@ -54,9 +55,11 @@ func (group *ChunkGroup) ReadDataAt(fileSize int64, buff []byte, offset int64) (
section, found := group.sections[si]
rangeStart, rangeStop := max(offset, int64(si*SectionSize)), min(offset+int64(len(buff)), int64((si+1)*SectionSize))
if !found {
+ rangeStop = min(rangeStop, fileSize)
for i := rangeStart; i < rangeStop; i++ {
buff[i-offset] = 0
}
+ n = int(int64(n) + rangeStop - rangeStart)
continue
}
xn, xTsNs, xErr := section.readDataAt(group, fileSize, buff[rangeStart-offset:rangeStop-offset], rangeStart)
diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go
index 7ea2f0353..e9ae1800c 100644
--- a/weed/filer/filechunk_manifest.go
+++ b/weed/filer/filechunk_manifest.go
@@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
const (
@@ -120,7 +121,7 @@ func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunction
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return 0, err
}
- return util.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
+ return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
}
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
@@ -132,7 +133,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
for _, urlString := range urlStrings {
var localProcessed int
var writeErr error
- shouldRetry, err = util.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
+ shouldRetry, err = util_http.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
if totalWritten > localProcessed {
toBeSkipped := totalWritten - localProcessed
if len(data) <= toBeSkipped {
diff --git a/weed/filer/filer.go b/weed/filer/filer.go
index 016bfc8fa..80be0b88e 100644
--- a/weed/filer/filer.go
+++ b/weed/filer/filer.go
@@ -78,7 +78,7 @@ func NewFiler(masters pb.ServerDiscovery, grpcDialOption grpc.DialOption, filerH
return f
}
-func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) {
+func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) {
if len(existingNodes) == 0 {
return
}
@@ -91,25 +91,13 @@ func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*
}
glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId)
- f.UniqueFilerEpoch++
-
- metadataFollowOption := &pb.MetadataFollowOption{
- ClientName: "bootstrap",
- ClientId: f.UniqueFilerId,
- ClientEpoch: f.UniqueFilerEpoch,
- SelfSignature: f.Signature,
- PathPrefix: "/",
- AdditionalPathPrefixes: nil,
- DirectoriesToWatch: nil,
- StartTsNs: 0,
- StopTsNs: snapshotTime.UnixNano(),
- EventErrorType: pb.FatalOnError,
- }
- err = pb.FollowMetadata(pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, metadataFollowOption, func(resp *filer_pb.SubscribeMetadataResponse) error {
- return Replay(f.Store, resp)
+ return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error {
+ return f.Store.InsertEntry(context.Background(), FromPbEntry(string(parentPath), entry))
+ })
})
- return
+
}
func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, startFrom time.Time) {
diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go
index 25bf83771..69755a16a 100644
--- a/weed/filer/filer_conf.go
+++ b/weed/filer/filer_conf.go
@@ -102,7 +102,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
for _, location := range conf.Locations {
- err = fc.AddLocationConf(location)
+ err = fc.SetLocationConf(location)
if err != nil {
// this is not recoverable
return nil
@@ -111,7 +111,24 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
return nil
}
+func (fc *FilerConf) GetLocationConf(locationPrefix string)(locConf *filer_pb.FilerConf_PathConf, found bool) {
+ return fc.rules.Get([]byte(locationPrefix))
+}
+
+func (fc *FilerConf) SetLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
+ err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
+ if err != nil {
+ glog.Errorf("put location prefix: %v", err)
+ }
+ return
+}
+
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
+ existingConf, found := fc.rules.Get([]byte(locConf.LocationPrefix))
+ if found {
+ mergePathConf(existingConf, locConf)
+ locConf = existingConf
+ }
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil {
glog.Errorf("put location prefix: %v", err)
@@ -170,6 +187,7 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
a.DataCenter = util.Nvl(b.DataCenter, a.DataCenter)
a.Rack = util.Nvl(b.Rack, a.Rack)
a.DataNode = util.Nvl(b.DataNode, a.DataNode)
+ a.DisableChunkDeletion = b.DisableChunkDeletion || a.DisableChunkDeletion
}
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index 64546940e..0ae421981 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -17,7 +17,7 @@ const (
type OnChunksFunc func([]*filer_pb.FileChunk) error
type OnHardLinkIdsFunc func([]HardLinkId) error
-func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) {
+func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32, ifNotModifiedAfter int64) (err error) {
if p == "/" {
return nil
}
@@ -26,6 +26,9 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
if findErr != nil {
return findErr
}
+ if ifNotModifiedAfter > 0 && entry.Attr.Mtime.Unix() > ifNotModifiedAfter {
+ return nil
+ }
isDeleteCollection := f.isBucket(entry)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
@@ -50,12 +53,12 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
}
if shouldDeleteChunks && !isDeleteCollection {
- f.DirectDeleteChunks(entry.GetChunks())
+ f.DeleteChunks(p, entry.GetChunks())
}
if isDeleteCollection {
collectionName := entry.Name()
- f.doDeleteCollection(collectionName)
+ f.DoDeleteCollection(collectionName)
}
return nil
@@ -133,7 +136,7 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou
return nil
}
-func (f *Filer) doDeleteCollection(collectionName string) (err error) {
+func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
return f.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
_, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go
index 84c5ed4c0..362c7c51b 100644
--- a/weed/filer/filer_deletion.go
+++ b/weed/filer/filer_deletion.go
@@ -1,11 +1,12 @@
package filer
import (
- "github.com/seaweedfs/seaweedfs/weed/storage"
- "github.com/seaweedfs/seaweedfs/weed/util"
"strings"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/storage"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -22,6 +23,7 @@ func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []str
locations = append(locations, operation.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
+ GrpcPort: loc.GrpcPort,
})
}
m[vid] = &operation.LookupResult{
@@ -53,7 +55,7 @@ func (f *Filer) loopProcessingDeletion() {
fileIds = fileIds[:0]
}
deletionCount = len(toDeleteFileIds)
- _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ _, err := operation.DeleteFileIdsWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
if err != nil {
if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) {
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
@@ -70,50 +72,6 @@ func (f *Filer) loopProcessingDeletion() {
}
}
-func (f *Filer) doDeleteFileIds(fileIds []string) {
-
- lookupFunc := LookupByMasterClientFn(f.MasterClient)
- DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
-
- for len(fileIds) > 0 {
- var toDeleteFileIds []string
- if len(fileIds) > DeletionBatchSize {
- toDeleteFileIds = fileIds[:DeletionBatchSize]
- fileIds = fileIds[DeletionBatchSize:]
- } else {
- toDeleteFileIds = fileIds
- fileIds = fileIds[:0]
- }
- deletionCount := len(toDeleteFileIds)
- _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
- if err != nil {
- if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) {
- glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
- }
- }
- }
-}
-
-func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
- var fileIdsToDelete []string
- for _, chunk := range chunks {
- if !chunk.IsChunkManifest {
- fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString())
- continue
- }
- dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
- if manifestResolveErr != nil {
- glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
- }
- for _, dChunk := range dataChunks {
- fileIdsToDelete = append(fileIdsToDelete, dChunk.GetFileIdString())
- }
- fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString())
- }
-
- f.doDeleteFileIds(fileIdsToDelete)
-}
-
func (f *Filer) DeleteUncommittedChunks(chunks []*filer_pb.FileChunk) {
f.doDeleteChunks(chunks)
}
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
index db953d398..621d4c227 100644
--- a/weed/filer/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -5,7 +5,6 @@ import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"io"
- "math"
"regexp"
"strings"
"time"
@@ -116,101 +115,34 @@ var (
func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, isDone bool, err error) {
- startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day())
- startHourMinute := fmt.Sprintf("%02d-%02d", startPosition.Hour(), startPosition.Minute())
- var stopDate, stopHourMinute string
- if stopTsNs != 0 {
- stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC()
- stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day())
- stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute())
- }
-
- sizeBuf := make([]byte, 4)
- startTsNs := startPosition.UnixNano()
-
- dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
- if listDayErr != nil {
- return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ visitor, visitErr := f.collectPersistedLogBuffer(startPosition, stopTsNs)
+ if visitErr != nil {
+ if visitErr == io.EOF {
+ return
+ }
+ err = fmt.Errorf("reading from persisted logs: %v", visitErr)
+ return
}
- for _, dayEntry := range dayEntries {
- if stopDate != "" {
- if strings.Compare(dayEntry.Name(), stopDate) > 0 {
+ var logEntry *filer_pb.LogEntry
+ for {
+ logEntry, visitErr = visitor.GetNext()
+ if visitErr != nil {
+ if visitErr == io.EOF {
break
}
+ err = fmt.Errorf("read next from persisted logs: %v", visitErr)
+ return
}
- // println("checking day", dayEntry.FullPath)
- hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
- if listHourMinuteErr != nil {
- return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ isDone, visitErr = eachLogEntryFn(logEntry)
+ if visitErr != nil {
+ err = fmt.Errorf("process persisted log entry: %v", visitErr)
+ return
}
- for _, hourMinuteEntry := range hourMinuteEntries {
- // println("checking hh-mm", hourMinuteEntry.FullPath)
- if dayEntry.Name() == startDate {
- hourMinute := util.FileNameBase(hourMinuteEntry.Name())
- if strings.Compare(hourMinute, startHourMinute) < 0 {
- continue
- }
- }
- if dayEntry.Name() == stopDate {
- hourMinute := util.FileNameBase(hourMinuteEntry.Name())
- if strings.Compare(hourMinute, stopHourMinute) > 0 {
- break
- }
- }
- // println("processing", hourMinuteEntry.FullPath)
- chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
- if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
- chunkedFileReader.Close()
- if err == io.EOF {
- continue
- }
- if VolumeNotFoundPattern.MatchString(err.Error()) {
- glog.Warningf("skipping reading %s: %v", hourMinuteEntry.FullPath, err)
- continue
- }
- return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
- }
- chunkedFileReader.Close()
+ lastTsNs = logEntry.TsNs
+ if isDone {
+ return
}
}
- return lastTsNs, isDone, nil
-}
-
-func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, err error) {
- for {
- n, err := r.Read(sizeBuf)
- if err != nil {
- return lastTsNs, err
- }
- if n != 4 {
- return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
- }
- size := util.BytesToUint32(sizeBuf)
- // println("entry size", size)
- entryData := make([]byte, size)
- n, err = r.Read(entryData)
- if err != nil {
- return lastTsNs, err
- }
- if n != int(size) {
- return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
- }
- logEntry := &filer_pb.LogEntry{}
- if err = proto.Unmarshal(entryData, logEntry); err != nil {
- return lastTsNs, err
- }
- if logEntry.TsNs <= startTsNs {
- continue
- }
- if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
- return lastTsNs, err
- }
- // println("each log: ", logEntry.TsNs)
- if _, err := eachLogEntryFn(logEntry); err != nil {
- return lastTsNs, err
- } else {
- lastTsNs = logEntry.TsNs
- }
- }
+ return
}
diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go
index 66ce24871..3c9a3496c 100644
--- a/weed/filer/filer_notify_append.go
+++ b/weed/filer/filer_notify_append.go
@@ -77,7 +77,13 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
PairMap: nil,
Jwt: assignResult.Auth,
}
- uploadResult, err := operation.UploadData(data, uploadOption)
+
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+
+ uploadResult, err := uploader.UploadData(data, uploadOption)
if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}
diff --git a/weed/filer/filer_notify_read.go b/weed/filer/filer_notify_read.go
new file mode 100644
index 000000000..115a925e9
--- /dev/null
+++ b/weed/filer/filer_notify_read.go
@@ -0,0 +1,352 @@
+package filer
+
+import (
+ "container/heap"
+ "context"
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
+ "github.com/seaweedfs/seaweedfs/weed/wdclient"
+ "google.golang.org/protobuf/proto"
+ "io"
+ "math"
+ "strings"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+)
+
+type LogFileEntry struct {
+ TsNs int64
+ FileEntry *Entry
+}
+
+func (f *Filer) collectPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64) (v *OrderedLogVisitor, err error) {
+
+ if stopTsNs != 0 && startPosition.Time.UnixNano() > stopTsNs {
+ return nil, io.EOF
+ }
+
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day())
+
+ dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
+ if listDayErr != nil {
+ return nil, fmt.Errorf("fail to list log by day: %v", listDayErr)
+ }
+
+ return NewOrderedLogVisitor(f, startPosition, stopTsNs, dayEntries)
+
+}
+
+// ----------
+type LogEntryItem struct {
+ Entry *filer_pb.LogEntry
+ filer string
+}
+
+// LogEntryItemPriorityQueue a priority queue for LogEntry
+type LogEntryItemPriorityQueue []*LogEntryItem
+
+func (pq LogEntryItemPriorityQueue) Len() int { return len(pq) }
+func (pq LogEntryItemPriorityQueue) Less(i, j int) bool {
+ return pq[i].Entry.TsNs < pq[j].Entry.TsNs
+}
+func (pq LogEntryItemPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] }
+func (pq *LogEntryItemPriorityQueue) Push(x any) {
+ item := x.(*LogEntryItem)
+ *pq = append(*pq, item)
+}
+func (pq *LogEntryItemPriorityQueue) Pop() any {
+ n := len(*pq)
+ item := (*pq)[n-1]
+ *pq = (*pq)[:n-1]
+ return item
+}
+
+// ----------
+
+type OrderedLogVisitor struct {
+ perFilerIteratorMap map[string]*LogFileQueueIterator
+ pq *LogEntryItemPriorityQueue
+ logFileEntryCollector *LogFileEntryCollector
+}
+
+func NewOrderedLogVisitor(f *Filer, startPosition log_buffer.MessagePosition, stopTsNs int64, dayEntries []*Entry) (*OrderedLogVisitor, error) {
+
+ perFilerQueueMap := make(map[string]*LogFileQueueIterator)
+ // initialize the priority queue
+ pq := &LogEntryItemPriorityQueue{}
+ heap.Init(pq)
+
+ t := &OrderedLogVisitor{
+ perFilerIteratorMap: perFilerQueueMap,
+ pq: pq,
+ logFileEntryCollector: NewLogFileEntryCollector(f, startPosition, stopTsNs, dayEntries),
+ }
+ if err := t.logFileEntryCollector.collectMore(t); err != nil && err != io.EOF {
+ return nil, err
+ }
+ return t, nil
+}
+
+func (o *OrderedLogVisitor) GetNext() (logEntry *filer_pb.LogEntry, err error) {
+ if o.pq.Len() == 0 {
+ return nil, io.EOF
+ }
+ item := heap.Pop(o.pq).(*LogEntryItem)
+ filerId := item.filer
+
+ // fill the pq with the next log entry from the same filer
+ it := o.perFilerIteratorMap[filerId]
+ next, nextErr := it.getNext(o)
+ if nextErr != nil {
+ if nextErr == io.EOF {
+ // do nothing since the filer has no more log entries
+ }else {
+ return nil, fmt.Errorf("failed to get next log entry: %v", nextErr)
+ }
+ } else {
+ heap.Push(o.pq, &LogEntryItem{
+ Entry: next,
+ filer: filerId,
+ })
+ }
+ return item.Entry, nil
+}
+
+func getFilerId(name string) string {
+ idx := strings.LastIndex(name, ".")
+ if idx < 0 {
+ return ""
+ }
+ return name[idx+1:]
+}
+
+// ----------
+
+type LogFileEntryCollector struct {
+ f *Filer
+ startTsNs int64
+ stopTsNs int64
+ dayEntryQueue *util.Queue[*Entry]
+ startDate string
+ startHourMinute string
+ stopDate string
+ stopHourMinute string
+}
+
+func NewLogFileEntryCollector(f *Filer, startPosition log_buffer.MessagePosition, stopTsNs int64, dayEntries []*Entry) *LogFileEntryCollector {
+ dayEntryQueue := util.NewQueue[*Entry]()
+ for _, dayEntry := range dayEntries {
+ dayEntryQueue.Enqueue(dayEntry)
+ // println("enqueue day entry", dayEntry.Name())
+ }
+
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d", startPosition.Hour(), startPosition.Minute())
+ var stopDate, stopHourMinute string
+ if stopTsNs != 0 {
+ stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC()
+ stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day())
+ stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute())
+ }
+
+ return &LogFileEntryCollector{
+ f: f,
+ startTsNs: startPosition.UnixNano(),
+ stopTsNs: stopTsNs,
+ dayEntryQueue: dayEntryQueue,
+ startDate: startDate,
+ startHourMinute: startHourMinute,
+ stopDate: stopDate,
+ stopHourMinute: stopHourMinute,
+ }
+}
+
+func (c *LogFileEntryCollector) hasMore() bool {
+ return c.dayEntryQueue.Len() > 0
+}
+
+func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
+ dayEntry := c.dayEntryQueue.Dequeue()
+ if dayEntry == nil {
+ return io.EOF
+ }
+ // println("dequeue day entry", dayEntry.Name())
+ if c.stopDate != "" {
+ if strings.Compare(dayEntry.Name(), c.stopDate) > 0 {
+ return io.EOF
+ }
+ }
+
+ hourMinuteEntries, _, listHourMinuteErr := c.f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
+ if listHourMinuteErr != nil {
+ return fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ }
+ freshFilerIds := make(map[string]string)
+ for _, hourMinuteEntry := range hourMinuteEntries {
+ // println("checking hh-mm", hourMinuteEntry.FullPath)
+ hourMinute := util.FileNameBase(hourMinuteEntry.Name())
+ if dayEntry.Name() == c.startDate {
+ if strings.Compare(hourMinute, c.startHourMinute) < 0 {
+ continue
+ }
+ }
+ if dayEntry.Name() == c.stopDate {
+ if strings.Compare(hourMinute, c.stopHourMinute) > 0 {
+ break
+ }
+ }
+
+ tsMinute := fmt.Sprintf("%s-%s", dayEntry.Name(), hourMinute)
+ // println(" enqueue", tsMinute)
+ t, parseErr := time.Parse("2006-01-02-15-04", tsMinute)
+ if parseErr != nil {
+ glog.Errorf("failed to parse %s: %v", tsMinute, parseErr)
+ continue
+ }
+ filerId := getFilerId(hourMinuteEntry.Name())
+ iter, found := v.perFilerIteratorMap[filerId]
+ if !found {
+ iter = newLogFileQueueIterator(c.f.MasterClient, util.NewQueue[*LogFileEntry](), c.startTsNs, c.stopTsNs)
+ v.perFilerIteratorMap[filerId] = iter
+ freshFilerIds[filerId] = hourMinuteEntry.Name()
+ }
+ iter.q.Enqueue(&LogFileEntry{
+ TsNs: t.UnixNano(),
+ FileEntry: hourMinuteEntry,
+ })
+ }
+
+ // fill the pq with the next log entry if it is a new filer
+ for filerId, entryName := range freshFilerIds {
+ iter, found := v.perFilerIteratorMap[filerId]
+ if !found {
+ glog.Errorf("Unexpected! failed to find iterator for filer %s", filerId)
+ continue
+ }
+ next, nextErr := iter.getNext(v)
+ if nextErr != nil {
+ if nextErr == io.EOF {
+ // do nothing since the filer has no more log entries
+ }else {
+ return fmt.Errorf("failed to get next log entry for %v: %v", entryName, err)
+ }
+ } else {
+ heap.Push(v.pq, &LogEntryItem{
+ Entry: next,
+ filer: filerId,
+ })
+ }
+ }
+
+ return nil
+}
+
+// ----------
+
+type LogFileQueueIterator struct {
+ q *util.Queue[*LogFileEntry]
+ masterClient *wdclient.MasterClient
+ startTsNs int64
+ stopTsNs int64
+ currentFileIterator *LogFileIterator
+}
+
+func newLogFileQueueIterator(masterClient *wdclient.MasterClient, q *util.Queue[*LogFileEntry], startTsNs, stopTsNs int64) *LogFileQueueIterator {
+ return &LogFileQueueIterator{
+ q: q,
+ masterClient: masterClient,
+ startTsNs: startTsNs,
+ stopTsNs: stopTsNs,
+ }
+}
+
+// getNext will return io.EOF when done
+func (iter *LogFileQueueIterator) getNext(v *OrderedLogVisitor) (logEntry *filer_pb.LogEntry, err error) {
+ for {
+ if iter.currentFileIterator != nil {
+ logEntry, err = iter.currentFileIterator.getNext()
+ if err != io.EOF {
+ return
+ }
+ }
+ // now either iter.currentFileIterator is nil or err is io.EOF
+ if iter.q.Len() == 0 {
+ return nil, io.EOF
+ }
+ t := iter.q.Dequeue()
+ if t == nil {
+ continue
+ }
+ // skip the file if it is after the stopTsNs
+ if iter.stopTsNs != 0 && t.TsNs > iter.stopTsNs {
+ return nil, io.EOF
+ }
+ next := iter.q.Peek()
+ if next == nil {
+ if collectErr := v.logFileEntryCollector.collectMore(v); collectErr != nil && collectErr != io.EOF {
+ return nil, collectErr
+ }
+ }
+ // skip the file if the next entry is before the startTsNs
+ if next != nil && next.TsNs <= iter.startTsNs {
+ continue
+ }
+ iter.currentFileIterator = newLogFileIterator(iter.masterClient, t.FileEntry, iter.startTsNs, iter.stopTsNs)
+ }
+}
+
+// ----------
+
+type LogFileIterator struct {
+ r io.Reader
+ sizeBuf []byte
+ startTsNs int64
+ stopTsNs int64
+}
+
+func newLogFileIterator(masterClient *wdclient.MasterClient, fileEntry *Entry, startTsNs, stopTsNs int64) *LogFileIterator {
+ return &LogFileIterator{
+ r: NewChunkStreamReaderFromFiler(masterClient, fileEntry.Chunks),
+ sizeBuf: make([]byte, 4),
+ startTsNs: startTsNs,
+ stopTsNs: stopTsNs,
+ }
+}
+
+// getNext will return io.EOF when done
+func (iter *LogFileIterator) getNext() (logEntry *filer_pb.LogEntry, err error) {
+ var n int
+ for {
+ n, err = iter.r.Read(iter.sizeBuf)
+ if err != nil {
+ return
+ }
+ if n != 4 {
+ return nil, fmt.Errorf("size %d bytes, expected 4 bytes", n)
+ }
+ size := util.BytesToUint32(iter.sizeBuf)
+ // println("entry size", size)
+ entryData := make([]byte, size)
+ n, err = iter.r.Read(entryData)
+ if err != nil {
+ return
+ }
+ if n != int(size) {
+ return nil, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
+ }
+ logEntry = &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ return
+ }
+ if logEntry.TsNs <= iter.startTsNs {
+ continue
+ }
+ if iter.stopTsNs != 0 && logEntry.TsNs > iter.stopTsNs {
+ return nil, io.EOF
+ }
+ return
+ }
+}
diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go
index d3beaba89..9c448edfd 100644
--- a/weed/filer/filerstore_wrapper.go
+++ b/weed/filer/filerstore_wrapper.go
@@ -85,7 +85,7 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string,
func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) {
store = fsw.defaultStore
- if path == "/" {
+ if path == "/" || path == "//" {
return
}
var storeId string
@@ -182,7 +182,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
}()
existingEntry, findErr := fsw.FindEntry(ctx, fp)
- if findErr == filer_pb.ErrNotFound {
+ if findErr == filer_pb.ErrNotFound || existingEntry == nil {
return nil
}
if len(existingEntry.HardLinkId) != 0 {
diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go
index 78d15382f..b465046f9 100644
--- a/weed/filer/leveldb2/leveldb2_store.go
+++ b/weed/filer/leveldb2/leveldb2_store.go
@@ -25,8 +25,9 @@ func init() {
}
type LevelDB2Store struct {
- dbs []*leveldb.DB
- dbCount int
+ dbs []*leveldb.DB
+ dbCount int
+ ReadOnly bool
}
func (store *LevelDB2Store) GetName() string {
@@ -49,6 +50,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
+ ReadOnly: store.ReadOnly,
}
for d := 0; d < dbCount; d++ {
diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go
index 406dc80be..2522221da 100644
--- a/weed/filer/leveldb3/leveldb3_store.go
+++ b/weed/filer/leveldb3/leveldb3_store.go
@@ -31,9 +31,10 @@ func init() {
}
type LevelDB3Store struct {
- dir string
- dbs map[string]*leveldb.DB
- dbsLock sync.RWMutex
+ dir string
+ dbs map[string]*leveldb.DB
+ dbsLock sync.RWMutex
+ ReadOnly bool
}
func (store *LevelDB3Store) GetName() string {
@@ -69,12 +70,14 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
Filter: bloom,
+ ReadOnly: store.ReadOnly,
}
if name != DEFAULT {
opts = &opt.Options{
BlockCacheCapacity: 16 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 8 * 1024 * 1024, // default value is 4MiB
Filter: bloom,
+ ReadOnly: store.ReadOnly,
}
}
diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go
index 51c4e6987..0432e17de 100644
--- a/weed/filer/meta_replay.go
+++ b/weed/filer/meta_replay.go
@@ -2,6 +2,7 @@ package filer
import (
"context"
+ "sync"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -35,3 +36,40 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
return nil
}
+
+
+// ParallelProcessDirectoryStructure processes each entry in parallel, and also ensure parent directories are processed first.
+// This also assumes the parent directories are in the entryChan already.
+func ParallelProcessDirectoryStructure(entryChan chan *Entry, concurrency int, eachEntryFn func(entry *Entry)(error)) (firstErr error) {
+
+ executors := util.NewLimitedConcurrentExecutor(concurrency)
+
+ var wg sync.WaitGroup
+ for entry := range entryChan {
+ wg.Add(1)
+ if entry.IsDirectory() {
+ func() {
+ defer wg.Done()
+ if err := eachEntryFn(entry); err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ }()
+ } else {
+ executors.Execute(func() {
+ defer wg.Done()
+ if err := eachEntryFn(entry); err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ })
+ }
+ if firstErr != nil {
+ break
+ }
+ }
+ wg.Wait()
+ return
+}
diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go
index 7be54b193..fea2bbc89 100644
--- a/weed/filer/reader_cache.go
+++ b/weed/filer/reader_cache.go
@@ -2,7 +2,6 @@ package filer
import (
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/util"
"sync"
"sync/atomic"
"time"
@@ -10,6 +9,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type ReaderCache struct {
@@ -171,7 +171,7 @@ func (s *SingleChunkCacher) startCaching() {
s.data = mem.Allocate(s.chunkSize)
- _, s.err = util.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0)
+ _, s.err = util_http.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0)
if s.err != nil {
mem.Free(s.data)
s.data = nil
diff --git a/weed/filer/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go
index b24a9f5d3..be2710948 100644
--- a/weed/filer/redis/redis_cluster_store.go
+++ b/weed/filer/redis/redis_cluster_store.go
@@ -1,7 +1,7 @@
package redis
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis/redis_store.go b/weed/filer/redis/redis_store.go
index c3f7194e6..823bbf610 100644
--- a/weed/filer/redis/redis_store.go
+++ b/weed/filer/redis/redis_store.go
@@ -1,7 +1,7 @@
package redis
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go
index 33c0ea342..8c8d6539c 100644
--- a/weed/filer/redis/universal_redis_store.go
+++ b/weed/filer/redis/universal_redis_store.go
@@ -7,7 +7,7 @@ import (
"strings"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go
index 12ab70cec..230cda759 100644
--- a/weed/filer/redis/universal_redis_store_kv.go
+++ b/weed/filer/redis/universal_redis_store_kv.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
)
diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go
index 835bb5154..6e4f11d22 100644
--- a/weed/filer/redis2/redis_cluster_store.go
+++ b/weed/filer/redis2/redis_cluster_store.go
@@ -1,7 +1,7 @@
package redis2
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis2/redis_sentinel_store.go b/weed/filer/redis2/redis_sentinel_store.go
index 313977dc7..5fc368fc7 100644
--- a/weed/filer/redis2/redis_sentinel_store.go
+++ b/weed/filer/redis2/redis_sentinel_store.go
@@ -1,7 +1,7 @@
package redis2
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
"time"
diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go
index 52b29121d..a271dbcd1 100644
--- a/weed/filer/redis2/redis_store.go
+++ b/weed/filer/redis2/redis_store.go
@@ -1,7 +1,7 @@
package redis2
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go
index 6b0e65c3d..d3f01f88a 100644
--- a/weed/filer/redis2/universal_redis_store.go
+++ b/weed/filer/redis2/universal_redis_store.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -57,7 +57,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
}
if name != "" {
- if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
}
diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go
index a68a3449c..5b515f605 100644
--- a/weed/filer/redis2/universal_redis_store_kv.go
+++ b/weed/filer/redis2/universal_redis_store_kv.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
)
diff --git a/weed/filer/redis3/ItemList.go b/weed/filer/redis3/ItemList.go
index c576cbeb4..9e38089a7 100644
--- a/weed/filer/redis3/ItemList.go
+++ b/weed/filer/redis3/ItemList.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"
)
@@ -399,9 +399,9 @@ func (nl *ItemList) NodeSize(node *skiplist.SkipListElementReference) int {
func (nl *ItemList) NodeAddMember(node *skiplist.SkipListElementReference, names ...string) error {
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
- var members []*redis.Z
+ var members []redis.Z
for _, name := range names {
- members = append(members, &redis.Z{
+ members = append(members, redis.Z{
Score: 0,
Member: name,
})
diff --git a/weed/filer/redis3/item_list_serde.go b/weed/filer/redis3/item_list_serde.go
index e71243838..f4410b61b 100644
--- a/weed/filer/redis3/item_list_serde.go
+++ b/weed/filer/redis3/item_list_serde.go
@@ -1,7 +1,7 @@
package redis3
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"
"google.golang.org/protobuf/proto"
diff --git a/weed/filer/redis3/kv_directory_children.go b/weed/filer/redis3/kv_directory_children.go
index 422500ed7..5a2d76141 100644
--- a/weed/filer/redis3/kv_directory_children.go
+++ b/weed/filer/redis3/kv_directory_children.go
@@ -3,7 +3,7 @@ package redis3
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
diff --git a/weed/filer/redis3/kv_directory_children_test.go b/weed/filer/redis3/kv_directory_children_test.go
index 9d7acacf1..76a8dc00f 100644
--- a/weed/filer/redis3/kv_directory_children_test.go
+++ b/weed/filer/redis3/kv_directory_children_test.go
@@ -3,7 +3,7 @@ package redis3
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/stvp/tempredis"
"strconv"
"testing"
@@ -116,7 +116,7 @@ func BenchmarkRedis(b *testing.B) {
})
for i := 0; i < b.N; i++ {
- client.ZAddNX(context.Background(), "/yyy/bin", &redis.Z{Score: 0, Member: strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx"})
+ client.ZAddNX(context.Background(), "/yyy/bin", redis.Z{Score: 0, Member: strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx"})
}
}
@@ -149,7 +149,7 @@ func xTestNameListAdd(t *testing.T) {
ts1 := time.Now()
for i := 0; i < N; i++ {
- client.ZAddNX(context.Background(), "/x", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
+ client.ZAddNX(context.Background(), "/x", redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
}
ts2 := time.Now()
@@ -205,6 +205,6 @@ func xBenchmarkRedis(b *testing.B) {
})
for i := 0; i < b.N; i++ {
- client.ZAddNX(context.Background(), "/xxx/bin", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
+ client.ZAddNX(context.Background(), "/xxx/bin", redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
}
}
diff --git a/weed/filer/redis3/redis_cluster_store.go b/weed/filer/redis3/redis_cluster_store.go
index 1efa8e9b0..a8858bfa9 100644
--- a/weed/filer/redis3/redis_cluster_store.go
+++ b/weed/filer/redis3/redis_cluster_store.go
@@ -1,9 +1,9 @@
package redis3
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/go-redsync/redsync/v4"
- "github.com/go-redsync/redsync/v4/redis/goredis/v8"
+ "github.com/go-redsync/redsync/v4/redis/goredis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis3/redis_sentinel_store.go b/weed/filer/redis3/redis_sentinel_store.go
index 4135d9956..2e0a8d4ae 100644
--- a/weed/filer/redis3/redis_sentinel_store.go
+++ b/weed/filer/redis3/redis_sentinel_store.go
@@ -3,9 +3,9 @@ package redis3
import (
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/go-redsync/redsync/v4"
- "github.com/go-redsync/redsync/v4/redis/goredis/v8"
+ "github.com/go-redsync/redsync/v4/redis/goredis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis3/redis_store.go b/weed/filer/redis3/redis_store.go
index 9b98459e6..339115563 100644
--- a/weed/filer/redis3/redis_store.go
+++ b/weed/filer/redis3/redis_store.go
@@ -1,9 +1,9 @@
package redis3
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/go-redsync/redsync/v4"
- "github.com/go-redsync/redsync/v4/redis/goredis/v8"
+ "github.com/go-redsync/redsync/v4/redis/goredis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis3/skiplist_element_store.go b/weed/filer/redis3/skiplist_element_store.go
index b589e3e77..46506187e 100644
--- a/weed/filer/redis3/skiplist_element_store.go
+++ b/weed/filer/redis3/skiplist_element_store.go
@@ -3,7 +3,7 @@ package redis3
import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"
"google.golang.org/protobuf/proto"
diff --git a/weed/filer/redis3/universal_redis_store.go b/weed/filer/redis3/universal_redis_store.go
index 2fb9a5b3f..51675d971 100644
--- a/weed/filer/redis3/universal_redis_store.go
+++ b/weed/filer/redis3/universal_redis_store.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
redsync "github.com/go-redsync/redsync/v4"
"github.com/seaweedfs/seaweedfs/weed/filer"
diff --git a/weed/filer/redis3/universal_redis_store_kv.go b/weed/filer/redis3/universal_redis_store_kv.go
index 280eb4a96..fd665d5d4 100644
--- a/weed/filer/redis3/universal_redis_store_kv.go
+++ b/weed/filer/redis3/universal_redis_store_kv.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
)
diff --git a/weed/filer/redis_lua/redis_cluster_store.go b/weed/filer/redis_lua/redis_cluster_store.go
index ae274bb09..251aadbcd 100644
--- a/weed/filer/redis_lua/redis_cluster_store.go
+++ b/weed/filer/redis_lua/redis_cluster_store.go
@@ -1,7 +1,7 @@
package redis_lua
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis_lua/redis_sentinel_store.go b/weed/filer/redis_lua/redis_sentinel_store.go
index ac8ae75d7..f22a7fa66 100644
--- a/weed/filer/redis_lua/redis_sentinel_store.go
+++ b/weed/filer/redis_lua/redis_sentinel_store.go
@@ -1,7 +1,7 @@
package redis_lua
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
"time"
diff --git a/weed/filer/redis_lua/redis_store.go b/weed/filer/redis_lua/redis_store.go
index e88f3a3dc..8574baa09 100644
--- a/weed/filer/redis_lua/redis_store.go
+++ b/weed/filer/redis_lua/redis_store.go
@@ -1,7 +1,7 @@
package redis_lua
import (
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/util"
)
diff --git a/weed/filer/redis_lua/stored_procedure/init.go b/weed/filer/redis_lua/stored_procedure/init.go
index 1412ceba2..9373cc5a3 100644
--- a/weed/filer/redis_lua/stored_procedure/init.go
+++ b/weed/filer/redis_lua/stored_procedure/init.go
@@ -2,7 +2,7 @@ package stored_procedure
import (
_ "embed"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
func init() {
diff --git a/weed/filer/redis_lua/universal_redis_store.go b/weed/filer/redis_lua/universal_redis_store.go
index 59c128030..9e8dbcda7 100644
--- a/weed/filer/redis_lua/universal_redis_store.go
+++ b/weed/filer/redis_lua/universal_redis_store.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/redis_lua/stored_procedure"
diff --git a/weed/filer/redis_lua/universal_redis_store_kv.go b/weed/filer/redis_lua/universal_redis_store_kv.go
index a03ae6573..01a7ba560 100644
--- a/weed/filer/redis_lua/universal_redis_store_kv.go
+++ b/weed/filer/redis_lua/universal_redis_store_kv.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
"github.com/seaweedfs/seaweedfs/weed/filer"
)
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index 23a853b9a..fdb443b53 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -16,6 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var getLookupFileIdBackoffSchedule = []time.Duration{
@@ -194,7 +195,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer
return err
}
- n, err := util.RetriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk)
+ n, err := util_http.RetriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk)
if err != nil {
return err
}
@@ -350,7 +351,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
var buffer bytes.Buffer
var shouldRetry bool
for _, urlString := range urlStrings {
- shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) {
+ shouldRetry, err = util_http.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {
diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go
index 3a4fb914e..d1575a14e 100644
--- a/weed/iamapi/iamapi_server.go
+++ b/weed/iamapi/iamapi_server.go
@@ -72,7 +72,7 @@ func (iama *IamApiServer) registerRouter(router *mux.Router) {
// ListBuckets
// apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
- apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
+ apiRouter.Methods(http.MethodPost).Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
//
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler)
diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go
index efd74a059..f32e1ac51 100644
--- a/weed/iamapi/iamapi_test.go
+++ b/weed/iamapi/iamapi_test.go
@@ -189,7 +189,7 @@ func TestDeleteUser(t *testing.T) {
func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) {
rr := httptest.NewRecorder()
apiRouter := mux.NewRouter().SkipClean(true)
- apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions)
+ apiRouter.Path("/").Methods(http.MethodPost).HandlerFunc(ias.DoActions)
apiRouter.ServeHTTP(rr, req)
return rr, xml.Unmarshal(rr.Body.Bytes(), &v)
}
diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go
index 2e08432c0..f47d4a877 100644
--- a/weed/mount/filehandle.go
+++ b/weed/mount/filehandle.go
@@ -66,8 +66,8 @@ func (fh *FileHandle) FullPath() util.FullPath {
return fp
}
-func (fh *FileHandle) GetEntry() *filer_pb.Entry {
- return fh.entry.GetEntry()
+func (fh *FileHandle) GetEntry() *LockedEntry {
+ return fh.entry
}
func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
@@ -90,13 +90,6 @@ func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entr
}
func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
- fh.entryLock.Lock()
- defer fh.entryLock.Unlock()
-
- if fh.entry == nil {
- return
- }
-
fh.entry.AppendChunks(chunks)
}
@@ -105,9 +98,6 @@ func (fh *FileHandle) ReleaseHandle() {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("ReleaseHandle", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
- fh.entryLock.Lock()
- defer fh.entryLock.Unlock()
-
fh.dirtyPages.Destroy()
if IsDebugFileReadWrite {
fh.mirrorFile.Close()
diff --git a/weed/mount/filehandle_map.go b/weed/mount/filehandle_map.go
index f0051f061..852ef9e35 100644
--- a/weed/mount/filehandle_map.go
+++ b/weed/mount/filehandle_map.go
@@ -1,6 +1,7 @@
package mount
import (
+ "github.com/seaweedfs/seaweedfs/weed/util"
"sync"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -8,7 +9,6 @@ import (
type FileHandleToInode struct {
sync.RWMutex
- nextFh FileHandleId
inode2fh map[uint64]*FileHandle
fh2inode map[FileHandleId]uint64
}
@@ -17,7 +17,6 @@ func NewFileHandleToInode() *FileHandleToInode {
return &FileHandleToInode{
inode2fh: make(map[uint64]*FileHandle),
fh2inode: make(map[FileHandleId]uint64),
- nextFh: 0,
}
}
@@ -43,14 +42,13 @@ func (i *FileHandleToInode) AcquireFileHandle(wfs *WFS, inode uint64, entry *fil
defer i.Unlock()
fh, found := i.inode2fh[inode]
if !found {
- fh = newFileHandle(wfs, i.nextFh, inode, entry)
- i.nextFh++
+ fh = newFileHandle(wfs, FileHandleId(util.RandomUint64()), inode, entry)
i.inode2fh[inode] = fh
i.fh2inode[fh.fh] = inode
} else {
fh.counter++
}
- if fh.GetEntry() != entry {
+ if fh.GetEntry().GetEntry() != entry {
fh.SetEntry(entry)
}
return fh
diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go
index 7b2629c13..3c315b1c4 100644
--- a/weed/mount/filehandle_read.go
+++ b/weed/mount/filehandle_read.go
@@ -29,23 +29,19 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, e
fileFullPath := fh.FullPath()
entry := fh.GetEntry()
- if entry == nil {
- return 0, 0, io.EOF
- }
if entry.IsInRemoteOnly() {
glog.V(4).Infof("download remote entry %s", fileFullPath)
- newEntry, err := fh.downloadRemoteEntry(entry)
+ err := fh.downloadRemoteEntry(entry)
if err != nil {
glog.V(1).Infof("download remote entry %s: %v", fileFullPath, err)
return 0, 0, err
}
- entry = newEntry
}
fileSize := int64(entry.Attributes.FileSize)
if fileSize == 0 {
- fileSize = int64(filer.FileSize(entry))
+ fileSize = int64(filer.FileSize(entry.GetEntry()))
}
if fileSize == 0 {
@@ -70,7 +66,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, e
return int64(totalRead), ts, err
}
-func (fh *FileHandle) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
+func (fh *FileHandle) downloadRemoteEntry(entry *LockedEntry) (error) {
fileFullPath := fh.FullPath()
dir, _ := fileFullPath.DirAndName()
@@ -88,12 +84,12 @@ func (fh *FileHandle) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entr
return fmt.Errorf("CacheRemoteObjectToLocalCluster file %s: %v", fileFullPath, err)
}
- entry = resp.Entry
+ entry.SetEntry(resp.Entry)
fh.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry))
return nil
})
- return entry, err
+ return err
}
diff --git a/weed/mount/locked_entry.go b/weed/mount/locked_entry.go
index f3b4bf484..c5fbaee91 100644
--- a/weed/mount/locked_entry.go
+++ b/weed/mount/locked_entry.go
@@ -16,6 +16,8 @@ func (le *LockedEntry) GetEntry() *filer_pb.Entry {
return le.Entry
}
+// SetEntry sets the entry of the LockedEntry
+// entry should never be nil
func (le *LockedEntry) SetEntry(entry *filer_pb.Entry) {
le.Lock()
defer le.Unlock()
diff --git a/weed/mount/meta_cache/meta_cache.go b/weed/mount/meta_cache/meta_cache.go
index 46a76365c..8a571d9e6 100644
--- a/weed/mount/meta_cache/meta_cache.go
+++ b/weed/mount/meta_cache/meta_cache.go
@@ -4,6 +4,7 @@ import (
"context"
"os"
"sync"
+ "time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/filer/leveldb"
@@ -118,6 +119,9 @@ func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *fi
if err != nil {
return nil, err
}
+ if entry.TtlSec > 0 && entry.Crtime.Add(time.Duration(entry.TtlSec)).Before(time.Now()) {
+ return nil, filer_pb.ErrNotFound
+ }
mc.mapIdFromFilerToLocal(entry)
return
}
@@ -143,6 +147,9 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
}
_, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool {
+ if entry.TtlSec > 0 && entry.Crtime.Add(time.Duration(entry.TtlSec)).Before(time.Now()) {
+ return true
+ }
mc.mapIdFromFilerToLocal(entry)
return eachEntryFunc(entry)
})
diff --git a/weed/mount/meta_cache/meta_cache_subscribe.go b/weed/mount/meta_cache/meta_cache_subscribe.go
index 72da0ca64..d3bb27d08 100644
--- a/weed/mount/meta_cache/meta_cache_subscribe.go
+++ b/weed/mount/meta_cache/meta_cache_subscribe.go
@@ -7,6 +7,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
+ "strings"
)
func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
@@ -57,12 +58,17 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
+ prefix := dir
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "mount",
ClientId: selfSignature,
ClientEpoch: 1,
SelfSignature: selfSignature,
- PathPrefix: dir,
+ PathPrefix: prefix,
AdditionalPathPrefixes: nil,
DirectoriesToWatch: nil,
StartTsNs: lastTsNs,
diff --git a/weed/mount/weedfs.go b/weed/mount/weedfs.go
index c5a1d2755..a9fbd9380 100644
--- a/weed/mount/weedfs.go
+++ b/weed/mount/weedfs.go
@@ -112,9 +112,6 @@ func NewSeaweedFileSystem(option *Option) *WFS {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("invalidateFunc", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
- fh.entryLock.Lock()
- defer fh.entryLock.Unlock()
-
// Recreate dirty pages
fh.dirtyPages.Destroy()
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
@@ -122,7 +119,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
// Update handle entry
newentry, status := wfs.maybeLoadEntry(filePath)
if status == fuse.OK {
- if fh.GetEntry() != newentry {
+ if fh.GetEntry().GetEntry() != newentry {
fh.SetEntry(newentry)
}
}
diff --git a/weed/mount/weedfs_dir_lookup.go b/weed/mount/weedfs_dir_lookup.go
index e646b06a9..f3ba0cc85 100644
--- a/weed/mount/weedfs_dir_lookup.go
+++ b/weed/mount/weedfs_dir_lookup.go
@@ -59,7 +59,7 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin
if fh, found := wfs.fhmap.FindFileHandle(inode); found {
fh.entryLock.RLock()
- if entry := fh.GetEntry(); entry != nil {
+ if entry := fh.GetEntry().GetEntry(); entry != nil {
glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(entry))
localEntry = filer.FromPbEntry(string(dirPath), entry)
}
diff --git a/weed/mount/weedfs_dir_read.go b/weed/mount/weedfs_dir_read.go
index f140fd86f..c80ecce9e 100644
--- a/weed/mount/weedfs_dir_read.go
+++ b/weed/mount/weedfs_dir_read.go
@@ -6,6 +6,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
+ "github.com/seaweedfs/seaweedfs/weed/util"
"math"
"sync"
)
@@ -43,10 +44,7 @@ func NewDirectoryHandleToInode() *DirectoryHandleToInode {
}
func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) {
- wfs.fhmap.Lock()
- fh := wfs.fhmap.nextFh
- wfs.fhmap.nextFh++
- wfs.fhmap.Unlock()
+ fh := FileHandleId(util.RandomUint64())
wfs.dhmap.Lock()
defer wfs.dhmap.Unlock()
@@ -173,7 +171,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl
}
if fh, found := wfs.fhmap.FindFileHandle(inode); found {
glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name))
- entry = filer.FromPbEntry(string(dirPath), fh.GetEntry())
+ entry = filer.FromPbEntry(string(dirPath), fh.GetEntry().GetEntry())
}
wfs.outputFilerEntry(entryOut, inode, entry)
}
diff --git a/weed/mount/weedfs_file_lseek.go b/weed/mount/weedfs_file_lseek.go
index 35157d993..0cf7ef43b 100644
--- a/weed/mount/weedfs_file_lseek.go
+++ b/weed/mount/weedfs_file_lseek.go
@@ -38,10 +38,8 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO
// lock the file until the proper offset was calculated
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("Lseek", fh.fh, util.SharedLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
- fh.entryLock.RLock()
- defer fh.entryLock.RUnlock()
- fileSize := int64(filer.FileSize(fh.GetEntry()))
+ fileSize := int64(filer.FileSize(fh.GetEntry().GetEntry()))
offset := max(int64(in.Offset), 0)
glog.V(4).Infof(
diff --git a/weed/mount/weedfs_file_sync.go b/weed/mount/weedfs_file_sync.go
index 762a9b8de..d857606bd 100644
--- a/weed/mount/weedfs_file_sync.go
+++ b/weed/mount/weedfs_file_sync.go
@@ -116,13 +116,8 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
- fh.entryLock.Lock()
- defer fh.entryLock.Unlock()
entry := fh.GetEntry()
- if entry == nil {
- return nil
- }
entry.Name = name // this flush may be just after a rename operation
if entry.Attributes != nil {
@@ -141,7 +136,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
request := &filer_pb.CreateEntryRequest{
Directory: string(dir),
- Entry: entry,
+ Entry: entry.GetEntry(),
Signatures: []int32{wfs.signature},
SkipCheckParentDirectory: true,
}
diff --git a/weed/mount/weedfs_write.go b/weed/mount/weedfs_write.go
index 4c8470245..77ad01b89 100644
--- a/weed/mount/weedfs_write.go
+++ b/weed/mount/weedfs_write.go
@@ -14,8 +14,12 @@ import (
func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, filename string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ return
+ }
- fileId, uploadResult, err, data := operation.UploadWithRetry(
+ fileId, uploadResult, err, data := uploader.UploadWithRetry(
wfs,
&filer_pb.AssignVolumeRequest{
Count: 1,
diff --git a/weed/mq/broker/broker_topic_partition_read_write.go b/weed/mq/broker/broker_topic_partition_read_write.go
index 7dd78c582..4c1b9a1e2 100644
--- a/weed/mq/broker/broker_topic_partition_read_write.go
+++ b/weed/mq/broker/broker_topic_partition_read_write.go
@@ -13,6 +13,7 @@ import (
"math"
"sync/atomic"
"time"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Partition) log_buffer.LogFlushFuncType {
@@ -130,7 +131,7 @@ func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_p
for _, urlString := range urlStrings {
// TODO optimization opportunity: reuse the buffer
var data []byte
- if data, _, err = util.Get(urlString); err == nil {
+ if data, _, err = util_http.Get(urlString); err == nil {
processed = true
if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs); err != nil {
return
diff --git a/weed/mq/broker/broker_write.go b/weed/mq/broker/broker_write.go
index 896f0ee75..9999529fb 100644
--- a/weed/mq/broker/broker_write.go
+++ b/weed/mq/broker/broker_write.go
@@ -55,7 +55,13 @@ func (b *MessageQueueBroker) appendToFile(targetFile string, data []byte) error
func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) {
reader := util.NewBytesReader(data)
- fileId, uploadResult, err, _ = operation.UploadWithRetry(
+
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ return
+ }
+
+ fileId, uploadResult, err, _ = uploader.UploadWithRetry(
b,
&filer_pb.AssignVolumeRequest{
Count: 1,
diff --git a/weed/mq/client/cmd/weed_pub_kv/publisher_kv.go b/weed/mq/client/cmd/weed_pub_kv/publisher_kv.go
index 096b355a1..3ab3cb251 100644
--- a/weed/mq/client/cmd/weed_pub_kv/publisher_kv.go
+++ b/weed/mq/client/cmd/weed_pub_kv/publisher_kv.go
@@ -9,6 +9,7 @@ import (
"strings"
"sync"
"time"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -45,6 +46,7 @@ func doPublish(publisher *pub_client.TopicPublisher, id int) {
func main() {
flag.Parse()
+ util_http.InitGlobalHttpClient()
config := &pub_client.PublisherConfiguration{
Topic: topic.NewTopic(*namespace, *t),
diff --git a/weed/mq/client/cmd/weed_pub_record/publisher_record.go b/weed/mq/client/cmd/weed_pub_record/publisher_record.go
index a5fbd455e..f340dd1c8 100644
--- a/weed/mq/client/cmd/weed_pub_record/publisher_record.go
+++ b/weed/mq/client/cmd/weed_pub_record/publisher_record.go
@@ -11,6 +11,7 @@ import (
"strings"
"sync"
"time"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -86,6 +87,7 @@ func (r *MyRecord) ToRecordValue() *schema_pb.RecordValue {
func main() {
flag.Parse()
+ util_http.InitGlobalHttpClient()
recordType := schema.RecordTypeBegin().
WithField("key", schema.TypeBytes).
diff --git a/weed/mq/client/cmd/weed_sub_kv/subscriber_kv.go b/weed/mq/client/cmd/weed_sub_kv/subscriber_kv.go
index 902e7ed1b..8ff667763 100644
--- a/weed/mq/client/cmd/weed_sub_kv/subscriber_kv.go
+++ b/weed/mq/client/cmd/weed_sub_kv/subscriber_kv.go
@@ -11,6 +11,7 @@ import (
"google.golang.org/grpc/credentials/insecure"
"strings"
"time"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -25,6 +26,7 @@ var (
func main() {
flag.Parse()
+ util_http.InitGlobalHttpClient()
subscriberConfig := &sub_client.SubscriberConfiguration{
ConsumerGroup: "test",
diff --git a/weed/mq/client/cmd/weed_sub_record/subscriber_record.go b/weed/mq/client/cmd/weed_sub_record/subscriber_record.go
index 674c881ba..00fe83feb 100644
--- a/weed/mq/client/cmd/weed_sub_record/subscriber_record.go
+++ b/weed/mq/client/cmd/weed_sub_record/subscriber_record.go
@@ -13,6 +13,7 @@ import (
"google.golang.org/protobuf/proto"
"strings"
"time"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -51,6 +52,7 @@ func FromSchemaRecordValue(recordValue *schema_pb.RecordValue) *MyRecord {
func main() {
flag.Parse()
+ util_http.InitGlobalHttpClient()
subscriberConfig := &sub_client.SubscriberConfiguration{
ConsumerGroup: "test",
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index c451420fe..be3e5c98e 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -79,7 +80,7 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
- results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds)
+ results, err := DeleteFileIds(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
@@ -95,7 +96,7 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g
}
func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (written int64, e error) {
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
if err != nil {
return written, err
}
@@ -103,11 +104,11 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
- resp, err := util.Do(req)
+ resp, err := util_http.Do(req)
if err != nil {
return written, err
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable:
diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go
index cee80fb47..419223165 100644
--- a/weed/operation/delete_content.go
+++ b/weed/operation/delete_content.go
@@ -28,8 +28,8 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
return fid[:commaIndex], fid[commaIndex+1:], nil
}
-// DeleteFiles batch deletes a list of fileIds
-func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
+// DeleteFileIds batch deletes a list of fileIds
+func DeleteFileIds(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
lookupFunc := func(vids []string) (results map[string]*LookupResult, err error) {
results, err = LookupVolumeIds(masterFn, grpcDialOption, vids)
@@ -43,11 +43,11 @@ func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.Di
return
}
- return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
+ return DeleteFileIdsWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
}
-func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]*LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
+func DeleteFileIdsWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]*LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
var ret []*volume_server_pb.DeleteResult
@@ -102,7 +102,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
go func(server pb.ServerAddress, fidList []string) {
defer wg.Done()
- if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, false); deleteErr != nil {
+ if deleteResults, deleteErr := DeleteFileIdsAtOneVolumeServer(server, grpcDialOption, fidList, false); deleteErr != nil {
err = deleteErr
} else if deleteResults != nil {
resultChan <- deleteResults
@@ -120,8 +120,8 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
return ret, err
}
-// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
-func DeleteFilesAtOneVolumeServer(volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) {
+// DeleteFileIdsAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
+func DeleteFileIdsAtOneVolumeServer(volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) {
err = WithVolumeServerClient(false, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go
index 07b0153a9..b4bac5976 100644
--- a/weed/operation/needle_parse_test.go
+++ b/weed/operation/needle_parse_test.go
@@ -38,15 +38,11 @@ If the content is already compressed, need to know the content size.
*/
func TestCreateNeedleFromRequest(t *testing.T) {
- mc := &MockClient{}
- tmp := HttpClient
- HttpClient = mc
- defer func() {
- HttpClient = tmp
- }()
+ mockClient := &MockClient{}
+ uploader := newUploader(mockClient)
{
- mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) {
assert.Equal(t, nil, err, "upload: %v", err)
assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime))
assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
@@ -62,7 +58,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
PairMap: nil,
Jwt: "",
}
- uploadResult, err, data := Upload(bytes.NewReader([]byte(textContent)), uploadOption)
+ uploadResult, err, data := uploader.Upload(bytes.NewReader([]byte(textContent)), uploadOption)
if len(data) != len(textContent) {
t.Errorf("data actual %d expected %d", len(data), len(textContent))
}
@@ -73,7 +69,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
}
{
- mc.needleHandling = func(n *needle.Needle, originalSize int, err error) {
+ mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) {
assert.Equal(t, nil, err, "upload: %v", err)
assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
@@ -90,7 +86,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
PairMap: nil,
Jwt: "",
}
- Upload(bytes.NewReader(gzippedData), uploadOption)
+ uploader.Upload(bytes.NewReader(gzippedData), uploadOption)
}
/*
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 57bd81b14..516478dbe 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -217,7 +217,13 @@ func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jw
PairMap: nil,
Jwt: jwt,
}
- ret, e, _ := Upload(fi.Reader, uploadOption)
+
+ uploader, e := NewUploader()
+ if e != nil {
+ return 0, e
+ }
+
+ ret, e, _ := uploader.Upload(fi.Reader, uploadOption)
if e != nil {
return 0, e
}
@@ -239,7 +245,13 @@ func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn,
PairMap: nil,
Jwt: jwt,
}
- uploadResult, uploadError, _ := Upload(reader, uploadOption)
+
+ uploader, uploaderError := NewUploader()
+ if uploaderError != nil {
+ return 0, uploaderError
+ }
+
+ uploadResult, uploadError, _ := uploader.Upload(reader, uploadOption)
if uploadError != nil {
return 0, uploadError
}
@@ -265,6 +277,12 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
PairMap: nil,
Jwt: jwt,
}
- _, e = UploadData(buf, uploadOption)
+
+ uploader, e := NewUploader()
+ if e != nil {
+ return e
+ }
+
+ _, e = uploader.UploadData(buf, uploadOption)
return e
}
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index a1df07d7e..8b223e769 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -9,7 +9,7 @@ import (
"io"
"mime"
"mime/multipart"
- "net"
+ "sync"
"net/http"
"net/textproto"
"path/filepath"
@@ -21,6 +21,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
+ util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
)
type UploadOption struct {
@@ -62,29 +64,47 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsN
}
}
+var (
+ fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "")
+ uploader *Uploader
+ uploaderErr error
+ once sync.Once
+)
+
// HTTPClient interface for testing
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
-var (
- HttpClient HTTPClient
-)
+// Uploader
+type Uploader struct {
+ httpClient HTTPClient
+}
-func init() {
- HttpClient = &http.Client{Transport: &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: 10 * time.Second,
- KeepAlive: 10 * time.Second,
- }).DialContext,
- MaxIdleConns: 1024,
- MaxIdleConnsPerHost: 1024,
- }}
+func NewUploader() (*Uploader, error) {
+ once.Do(func () {
+ // With Dial context
+ var httpClient *util_http_client.HTTPClient
+ httpClient, uploaderErr = util_http.NewGlobalHttpClient(util_http_client.AddDialContext)
+ if uploaderErr != nil {
+ uploaderErr = fmt.Errorf("error initializing the loader: %s", uploaderErr)
+ }
+ if httpClient != nil {
+ uploader = newUploader(httpClient)
+ }
+ })
+ return uploader, uploaderErr
+}
+
+func newUploader(httpClient HTTPClient) (*Uploader) {
+ return &Uploader{
+ httpClient: httpClient,
+ }
}
// UploadWithRetry will retry both assigning volume request and uploading content
// The option parameter does not need to specify UploadUrl and Jwt, which will come from assigning volume.
-func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) {
+func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) {
doUploadFunc := func() error {
var host string
@@ -114,7 +134,7 @@ func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.A
uploadOption.Jwt = auth
var uploadErr error
- uploadResult, uploadErr, data = doUpload(reader, uploadOption)
+ uploadResult, uploadErr, data = uploader.doUpload(reader, uploadOption)
return uploadErr
}
if uploadOption.RetryForever {
@@ -130,21 +150,19 @@ func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.A
return
}
-var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "")
-
// Upload sends a POST request to a volume server to upload the content with adjustable compression level
-func UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
- uploadResult, err = retriedUploadData(data, option)
+func (uploader *Uploader) UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
+ uploadResult, err = uploader.retriedUploadData(data, option)
return
}
// Upload sends a POST request to a volume server to upload the content with fast compression
-func Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
- uploadResult, err, data = doUpload(reader, option)
+func (uploader *Uploader) Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
+ uploadResult, err, data = uploader.doUpload(reader, option)
return
}
-func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
+func (uploader *Uploader) doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
bytesReader, ok := reader.(*util.BytesReader)
if ok {
data = bytesReader.Bytes
@@ -155,16 +173,16 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul
return
}
}
- uploadResult, uploadErr := retriedUploadData(data, option)
+ uploadResult, uploadErr := uploader.retriedUploadData(data, option)
return uploadResult, uploadErr, data
}
-func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
+func (uploader *Uploader) retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
for i := 0; i < 3; i++ {
if i > 0 {
time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
}
- uploadResult, err = doUploadData(data, option)
+ uploadResult, err = uploader.doUploadData(data, option)
if err == nil {
uploadResult.RetryCount = i
return
@@ -174,7 +192,7 @@ func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadR
return
}
-func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
+func (uploader *Uploader) doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
contentIsGzipped := option.IsInputCompressed
shouldGzipNow := false
if !option.IsInputCompressed {
@@ -230,7 +248,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
}
// upload data
- uploadResult, err = upload_content(func(w io.Writer) (err error) {
+ uploadResult, err = uploader.upload_content(func(w io.Writer) (err error) {
_, err = w.Write(encryptedData)
return
}, len(encryptedData), &UploadOption{
@@ -251,7 +269,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
uploadResult.Size = uint32(clearDataLen)
} else {
// upload data
- uploadResult, err = upload_content(func(w io.Writer) (err error) {
+ uploadResult, err = uploader.upload_content(func(w io.Writer) (err error) {
_, err = w.Write(data)
return
}, len(data), &UploadOption{
@@ -277,7 +295,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
return uploadResult, err
}
-func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) {
+func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) {
var body_writer *multipart.Writer
var reqReader *bytes.Reader
var buf *bytebufferpool.ByteBuffer
@@ -325,7 +343,7 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize
} else {
reqReader = bytes.NewReader(option.BytesBuffer.Bytes())
}
- req, postErr := http.NewRequest("POST", option.UploadUrl, reqReader)
+ req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader)
if postErr != nil {
glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr)
return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr)
@@ -338,15 +356,15 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize
req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
}
// print("+")
- resp, post_err := HttpClient.Do(req)
- defer util.CloseResponse(resp)
+ resp, post_err := uploader.httpClient.Do(req)
+ defer util_http.CloseResponse(resp)
if post_err != nil {
if strings.Contains(post_err.Error(), "connection reset by peer") ||
strings.Contains(post_err.Error(), "use of closed network connection") {
glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr)
stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
- resp, post_err = HttpClient.Do(req)
- defer util.CloseResponse(resp)
+ resp, post_err = uploader.httpClient.Do(req)
+ defer util_http.CloseResponse(resp)
}
}
if post_err != nil {
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index b0829163c..7e948f0dc 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -54,6 +54,9 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
+ rpc TraverseBfsMetadata (TraverseBfsMetadataRequest) returns (stream TraverseBfsMetadataResponse) {
+ }
+
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
}
@@ -218,6 +221,7 @@ message DeleteEntryRequest {
bool ignore_recursive_error = 6;
bool is_from_other_cluster = 7;
repeated int32 signatures = 8;
+ int64 if_not_modified_after = 9;
}
message DeleteEntryResponse {
@@ -341,6 +345,8 @@ message GetFilerConfigurationResponse {
string version = 11;
string cluster_id = 12;
string filer_group = 13;
+ int32 major_version = 14;
+ int32 minor_version = 15;
}
message SubscribeMetadataRequest {
@@ -360,6 +366,15 @@ message SubscribeMetadataResponse {
int64 ts_ns = 3;
}
+message TraverseBfsMetadataRequest {
+ string directory = 1;
+ repeated string excluded_prefixes = 2;
+}
+message TraverseBfsMetadataResponse {
+ string directory = 1;
+ Entry entry = 2;
+}
+
message LogEntry {
int64 ts_ns = 1;
int32 partition_key_hash = 2;
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 617c13c55..93e4ffd8a 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -1333,6 +1333,7 @@ type DeleteEntryRequest struct {
IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"`
IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"`
Signatures []int32 `protobuf:"varint,8,rep,packed,name=signatures,proto3" json:"signatures,omitempty"`
+ IfNotModifiedAfter int64 `protobuf:"varint,9,opt,name=if_not_modified_after,json=ifNotModifiedAfter,proto3" json:"if_not_modified_after,omitempty"`
}
func (x *DeleteEntryRequest) Reset() {
@@ -1416,6 +1417,13 @@ func (x *DeleteEntryRequest) GetSignatures() []int32 {
return nil
}
+func (x *DeleteEntryRequest) GetIfNotModifiedAfter() int64 {
+ if x != nil {
+ return x.IfNotModifiedAfter
+ }
+ return 0
+}
+
type DeleteEntryResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2681,6 +2689,8 @@ type GetFilerConfigurationResponse struct {
Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
ClusterId string `protobuf:"bytes,12,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
FilerGroup string `protobuf:"bytes,13,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"`
+ MajorVersion int32 `protobuf:"varint,14,opt,name=major_version,json=majorVersion,proto3" json:"major_version,omitempty"`
+ MinorVersion int32 `protobuf:"varint,15,opt,name=minor_version,json=minorVersion,proto3" json:"minor_version,omitempty"`
}
func (x *GetFilerConfigurationResponse) Reset() {
@@ -2799,6 +2809,20 @@ func (x *GetFilerConfigurationResponse) GetFilerGroup() string {
return ""
}
+func (x *GetFilerConfigurationResponse) GetMajorVersion() int32 {
+ if x != nil {
+ return x.MajorVersion
+ }
+ return 0
+}
+
+func (x *GetFilerConfigurationResponse) GetMinorVersion() int32 {
+ if x != nil {
+ return x.MinorVersion
+ }
+ return 0
+}
+
type SubscribeMetadataRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2973,6 +2997,116 @@ func (x *SubscribeMetadataResponse) GetTsNs() int64 {
return 0
}
+type TraverseBfsMetadataRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ ExcludedPrefixes []string `protobuf:"bytes,2,rep,name=excluded_prefixes,json=excludedPrefixes,proto3" json:"excluded_prefixes,omitempty"`
+}
+
+func (x *TraverseBfsMetadataRequest) Reset() {
+ *x = TraverseBfsMetadataRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraverseBfsMetadataRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraverseBfsMetadataRequest) ProtoMessage() {}
+
+func (x *TraverseBfsMetadataRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraverseBfsMetadataRequest.ProtoReflect.Descriptor instead.
+func (*TraverseBfsMetadataRequest) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{43}
+}
+
+func (x *TraverseBfsMetadataRequest) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
+}
+
+func (x *TraverseBfsMetadataRequest) GetExcludedPrefixes() []string {
+ if x != nil {
+ return x.ExcludedPrefixes
+ }
+ return nil
+}
+
+type TraverseBfsMetadataResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"`
+ Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *TraverseBfsMetadataResponse) Reset() {
+ *x = TraverseBfsMetadataResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_filer_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraverseBfsMetadataResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraverseBfsMetadataResponse) ProtoMessage() {}
+
+func (x *TraverseBfsMetadataResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_filer_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraverseBfsMetadataResponse.ProtoReflect.Descriptor instead.
+func (*TraverseBfsMetadataResponse) Descriptor() ([]byte, []int) {
+ return file_filer_proto_rawDescGZIP(), []int{44}
+}
+
+func (x *TraverseBfsMetadataResponse) GetDirectory() string {
+ if x != nil {
+ return x.Directory
+ }
+ return ""
+}
+
+func (x *TraverseBfsMetadataResponse) GetEntry() *Entry {
+ if x != nil {
+ return x.Entry
+ }
+ return nil
+}
+
type LogEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2987,7 +3121,7 @@ type LogEntry struct {
func (x *LogEntry) Reset() {
*x = LogEntry{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[43]
+ mi := &file_filer_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3000,7 +3134,7 @@ func (x *LogEntry) String() string {
func (*LogEntry) ProtoMessage() {}
func (x *LogEntry) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[43]
+ mi := &file_filer_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3013,7 +3147,7 @@ func (x *LogEntry) ProtoReflect() protoreflect.Message {
// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
func (*LogEntry) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{43}
+ return file_filer_proto_rawDescGZIP(), []int{45}
}
func (x *LogEntry) GetTsNs() int64 {
@@ -3057,7 +3191,7 @@ type KeepConnectedRequest struct {
func (x *KeepConnectedRequest) Reset() {
*x = KeepConnectedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[44]
+ mi := &file_filer_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3070,7 +3204,7 @@ func (x *KeepConnectedRequest) String() string {
func (*KeepConnectedRequest) ProtoMessage() {}
func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[44]
+ mi := &file_filer_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3083,7 +3217,7 @@ func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead.
func (*KeepConnectedRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{44}
+ return file_filer_proto_rawDescGZIP(), []int{46}
}
func (x *KeepConnectedRequest) GetName() string {
@@ -3116,7 +3250,7 @@ type KeepConnectedResponse struct {
func (x *KeepConnectedResponse) Reset() {
*x = KeepConnectedResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[45]
+ mi := &file_filer_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3129,7 +3263,7 @@ func (x *KeepConnectedResponse) String() string {
func (*KeepConnectedResponse) ProtoMessage() {}
func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[45]
+ mi := &file_filer_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3142,7 +3276,7 @@ func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead.
func (*KeepConnectedResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{45}
+ return file_filer_proto_rawDescGZIP(), []int{47}
}
type LocateBrokerRequest struct {
@@ -3156,7 +3290,7 @@ type LocateBrokerRequest struct {
func (x *LocateBrokerRequest) Reset() {
*x = LocateBrokerRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[46]
+ mi := &file_filer_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3169,7 +3303,7 @@ func (x *LocateBrokerRequest) String() string {
func (*LocateBrokerRequest) ProtoMessage() {}
func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[46]
+ mi := &file_filer_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3182,7 +3316,7 @@ func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead.
func (*LocateBrokerRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{46}
+ return file_filer_proto_rawDescGZIP(), []int{48}
}
func (x *LocateBrokerRequest) GetResource() string {
@@ -3204,7 +3338,7 @@ type LocateBrokerResponse struct {
func (x *LocateBrokerResponse) Reset() {
*x = LocateBrokerResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[47]
+ mi := &file_filer_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3217,7 +3351,7 @@ func (x *LocateBrokerResponse) String() string {
func (*LocateBrokerResponse) ProtoMessage() {}
func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[47]
+ mi := &file_filer_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3230,7 +3364,7 @@ func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead.
func (*LocateBrokerResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{47}
+ return file_filer_proto_rawDescGZIP(), []int{49}
}
func (x *LocateBrokerResponse) GetFound() bool {
@@ -3261,7 +3395,7 @@ type KvGetRequest struct {
func (x *KvGetRequest) Reset() {
*x = KvGetRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[48]
+ mi := &file_filer_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3274,7 +3408,7 @@ func (x *KvGetRequest) String() string {
func (*KvGetRequest) ProtoMessage() {}
func (x *KvGetRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[48]
+ mi := &file_filer_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3287,7 +3421,7 @@ func (x *KvGetRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use KvGetRequest.ProtoReflect.Descriptor instead.
func (*KvGetRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{48}
+ return file_filer_proto_rawDescGZIP(), []int{50}
}
func (x *KvGetRequest) GetKey() []byte {
@@ -3309,7 +3443,7 @@ type KvGetResponse struct {
func (x *KvGetResponse) Reset() {
*x = KvGetResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[49]
+ mi := &file_filer_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3322,7 +3456,7 @@ func (x *KvGetResponse) String() string {
func (*KvGetResponse) ProtoMessage() {}
func (x *KvGetResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[49]
+ mi := &file_filer_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3335,7 +3469,7 @@ func (x *KvGetResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use KvGetResponse.ProtoReflect.Descriptor instead.
func (*KvGetResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{49}
+ return file_filer_proto_rawDescGZIP(), []int{51}
}
func (x *KvGetResponse) GetValue() []byte {
@@ -3364,7 +3498,7 @@ type KvPutRequest struct {
func (x *KvPutRequest) Reset() {
*x = KvPutRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[50]
+ mi := &file_filer_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3377,7 +3511,7 @@ func (x *KvPutRequest) String() string {
func (*KvPutRequest) ProtoMessage() {}
func (x *KvPutRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[50]
+ mi := &file_filer_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3390,7 +3524,7 @@ func (x *KvPutRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use KvPutRequest.ProtoReflect.Descriptor instead.
func (*KvPutRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{50}
+ return file_filer_proto_rawDescGZIP(), []int{52}
}
func (x *KvPutRequest) GetKey() []byte {
@@ -3418,7 +3552,7 @@ type KvPutResponse struct {
func (x *KvPutResponse) Reset() {
*x = KvPutResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[51]
+ mi := &file_filer_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3431,7 +3565,7 @@ func (x *KvPutResponse) String() string {
func (*KvPutResponse) ProtoMessage() {}
func (x *KvPutResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[51]
+ mi := &file_filer_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3444,7 +3578,7 @@ func (x *KvPutResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use KvPutResponse.ProtoReflect.Descriptor instead.
func (*KvPutResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{51}
+ return file_filer_proto_rawDescGZIP(), []int{53}
}
func (x *KvPutResponse) GetError() string {
@@ -3469,7 +3603,7 @@ type FilerConf struct {
func (x *FilerConf) Reset() {
*x = FilerConf{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[52]
+ mi := &file_filer_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3482,7 +3616,7 @@ func (x *FilerConf) String() string {
func (*FilerConf) ProtoMessage() {}
func (x *FilerConf) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[52]
+ mi := &file_filer_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3495,7 +3629,7 @@ func (x *FilerConf) ProtoReflect() protoreflect.Message {
// Deprecated: Use FilerConf.ProtoReflect.Descriptor instead.
func (*FilerConf) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{52}
+ return file_filer_proto_rawDescGZIP(), []int{54}
}
func (x *FilerConf) GetVersion() int32 {
@@ -3527,7 +3661,7 @@ type CacheRemoteObjectToLocalClusterRequest struct {
func (x *CacheRemoteObjectToLocalClusterRequest) Reset() {
*x = CacheRemoteObjectToLocalClusterRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[53]
+ mi := &file_filer_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3540,7 +3674,7 @@ func (x *CacheRemoteObjectToLocalClusterRequest) String() string {
func (*CacheRemoteObjectToLocalClusterRequest) ProtoMessage() {}
func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[53]
+ mi := &file_filer_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3553,7 +3687,7 @@ func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Mes
// Deprecated: Use CacheRemoteObjectToLocalClusterRequest.ProtoReflect.Descriptor instead.
func (*CacheRemoteObjectToLocalClusterRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{53}
+ return file_filer_proto_rawDescGZIP(), []int{55}
}
func (x *CacheRemoteObjectToLocalClusterRequest) GetDirectory() string {
@@ -3581,7 +3715,7 @@ type CacheRemoteObjectToLocalClusterResponse struct {
func (x *CacheRemoteObjectToLocalClusterResponse) Reset() {
*x = CacheRemoteObjectToLocalClusterResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[54]
+ mi := &file_filer_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3594,7 +3728,7 @@ func (x *CacheRemoteObjectToLocalClusterResponse) String() string {
func (*CacheRemoteObjectToLocalClusterResponse) ProtoMessage() {}
func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[54]
+ mi := &file_filer_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3607,7 +3741,7 @@ func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Me
// Deprecated: Use CacheRemoteObjectToLocalClusterResponse.ProtoReflect.Descriptor instead.
func (*CacheRemoteObjectToLocalClusterResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{54}
+ return file_filer_proto_rawDescGZIP(), []int{56}
}
func (x *CacheRemoteObjectToLocalClusterResponse) GetEntry() *Entry {
@@ -3635,7 +3769,7 @@ type LockRequest struct {
func (x *LockRequest) Reset() {
*x = LockRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[55]
+ mi := &file_filer_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3648,7 +3782,7 @@ func (x *LockRequest) String() string {
func (*LockRequest) ProtoMessage() {}
func (x *LockRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[55]
+ mi := &file_filer_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3661,7 +3795,7 @@ func (x *LockRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use LockRequest.ProtoReflect.Descriptor instead.
func (*LockRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{55}
+ return file_filer_proto_rawDescGZIP(), []int{57}
}
func (x *LockRequest) GetName() string {
@@ -3713,7 +3847,7 @@ type LockResponse struct {
func (x *LockResponse) Reset() {
*x = LockResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[56]
+ mi := &file_filer_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3726,7 +3860,7 @@ func (x *LockResponse) String() string {
func (*LockResponse) ProtoMessage() {}
func (x *LockResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[56]
+ mi := &file_filer_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3739,7 +3873,7 @@ func (x *LockResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use LockResponse.ProtoReflect.Descriptor instead.
func (*LockResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{56}
+ return file_filer_proto_rawDescGZIP(), []int{58}
}
func (x *LockResponse) GetRenewToken() string {
@@ -3783,7 +3917,7 @@ type UnlockRequest struct {
func (x *UnlockRequest) Reset() {
*x = UnlockRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[57]
+ mi := &file_filer_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3796,7 +3930,7 @@ func (x *UnlockRequest) String() string {
func (*UnlockRequest) ProtoMessage() {}
func (x *UnlockRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[57]
+ mi := &file_filer_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3809,7 +3943,7 @@ func (x *UnlockRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UnlockRequest.ProtoReflect.Descriptor instead.
func (*UnlockRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{57}
+ return file_filer_proto_rawDescGZIP(), []int{59}
}
func (x *UnlockRequest) GetName() string {
@@ -3845,7 +3979,7 @@ type UnlockResponse struct {
func (x *UnlockResponse) Reset() {
*x = UnlockResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[58]
+ mi := &file_filer_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3858,7 +3992,7 @@ func (x *UnlockResponse) String() string {
func (*UnlockResponse) ProtoMessage() {}
func (x *UnlockResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[58]
+ mi := &file_filer_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3871,7 +4005,7 @@ func (x *UnlockResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use UnlockResponse.ProtoReflect.Descriptor instead.
func (*UnlockResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{58}
+ return file_filer_proto_rawDescGZIP(), []int{60}
}
func (x *UnlockResponse) GetError() string {
@@ -3900,7 +4034,7 @@ type FindLockOwnerRequest struct {
func (x *FindLockOwnerRequest) Reset() {
*x = FindLockOwnerRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[59]
+ mi := &file_filer_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3913,7 +4047,7 @@ func (x *FindLockOwnerRequest) String() string {
func (*FindLockOwnerRequest) ProtoMessage() {}
func (x *FindLockOwnerRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[59]
+ mi := &file_filer_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3926,7 +4060,7 @@ func (x *FindLockOwnerRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use FindLockOwnerRequest.ProtoReflect.Descriptor instead.
func (*FindLockOwnerRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{59}
+ return file_filer_proto_rawDescGZIP(), []int{61}
}
func (x *FindLockOwnerRequest) GetName() string {
@@ -3954,7 +4088,7 @@ type FindLockOwnerResponse struct {
func (x *FindLockOwnerResponse) Reset() {
*x = FindLockOwnerResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[60]
+ mi := &file_filer_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3967,7 +4101,7 @@ func (x *FindLockOwnerResponse) String() string {
func (*FindLockOwnerResponse) ProtoMessage() {}
func (x *FindLockOwnerResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[60]
+ mi := &file_filer_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3980,7 +4114,7 @@ func (x *FindLockOwnerResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use FindLockOwnerResponse.ProtoReflect.Descriptor instead.
func (*FindLockOwnerResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{60}
+ return file_filer_proto_rawDescGZIP(), []int{62}
}
func (x *FindLockOwnerResponse) GetOwner() string {
@@ -4004,7 +4138,7 @@ type Lock struct {
func (x *Lock) Reset() {
*x = Lock{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[61]
+ mi := &file_filer_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4017,7 +4151,7 @@ func (x *Lock) String() string {
func (*Lock) ProtoMessage() {}
func (x *Lock) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[61]
+ mi := &file_filer_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4030,7 +4164,7 @@ func (x *Lock) ProtoReflect() protoreflect.Message {
// Deprecated: Use Lock.ProtoReflect.Descriptor instead.
func (*Lock) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{61}
+ return file_filer_proto_rawDescGZIP(), []int{63}
}
func (x *Lock) GetName() string {
@@ -4072,7 +4206,7 @@ type TransferLocksRequest struct {
func (x *TransferLocksRequest) Reset() {
*x = TransferLocksRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[62]
+ mi := &file_filer_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4085,7 +4219,7 @@ func (x *TransferLocksRequest) String() string {
func (*TransferLocksRequest) ProtoMessage() {}
func (x *TransferLocksRequest) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[62]
+ mi := &file_filer_proto_msgTypes[64]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4098,7 +4232,7 @@ func (x *TransferLocksRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use TransferLocksRequest.ProtoReflect.Descriptor instead.
func (*TransferLocksRequest) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{62}
+ return file_filer_proto_rawDescGZIP(), []int{64}
}
func (x *TransferLocksRequest) GetLocks() []*Lock {
@@ -4117,7 +4251,7 @@ type TransferLocksResponse struct {
func (x *TransferLocksResponse) Reset() {
*x = TransferLocksResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[63]
+ mi := &file_filer_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4130,7 +4264,7 @@ func (x *TransferLocksResponse) String() string {
func (*TransferLocksResponse) ProtoMessage() {}
func (x *TransferLocksResponse) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[63]
+ mi := &file_filer_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4143,7 +4277,7 @@ func (x *TransferLocksResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use TransferLocksResponse.ProtoReflect.Descriptor instead.
func (*TransferLocksResponse) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{63}
+ return file_filer_proto_rawDescGZIP(), []int{65}
}
// if found, send the exact address
@@ -4160,7 +4294,7 @@ type LocateBrokerResponse_Resource struct {
func (x *LocateBrokerResponse_Resource) Reset() {
*x = LocateBrokerResponse_Resource{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[66]
+ mi := &file_filer_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4173,7 +4307,7 @@ func (x *LocateBrokerResponse_Resource) String() string {
func (*LocateBrokerResponse_Resource) ProtoMessage() {}
func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[66]
+ mi := &file_filer_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4186,7 +4320,7 @@ func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message {
// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead.
func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{47, 0}
+ return file_filer_proto_rawDescGZIP(), []int{49, 0}
}
func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string {
@@ -4226,7 +4360,7 @@ type FilerConf_PathConf struct {
func (x *FilerConf_PathConf) Reset() {
*x = FilerConf_PathConf{}
if protoimpl.UnsafeEnabled {
- mi := &file_filer_proto_msgTypes[67]
+ mi := &file_filer_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4239,7 +4373,7 @@ func (x *FilerConf_PathConf) String() string {
func (*FilerConf_PathConf) ProtoMessage() {}
func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message {
- mi := &file_filer_proto_msgTypes[67]
+ mi := &file_filer_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4252,7 +4386,7 @@ func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message {
// Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead.
func (*FilerConf_PathConf) Descriptor() ([]byte, []int) {
- return file_filer_proto_rawDescGZIP(), []int{52, 0}
+ return file_filer_proto_rawDescGZIP(), []int{54, 0}
}
func (x *FilerConf_PathConf) GetLocationPrefix() string {
@@ -4536,7 +4670,7 @@ var file_filer_proto_rawDesc = []byte{
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68,
0x75, 0x6e, 0x6b, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xcb, 0x02,
0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
@@ -4554,482 +4688,509 @@ var file_filer_proto_rawDesc = []byte{
0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65,
0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xba, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63,
- 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
- 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69,
- 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
- 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69,
- 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61,
- 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0xba, 0x01, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d,
- 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d,
- 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05,
- 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x9a, 0x01, 0x0a,
- 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69,
- 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64,
- 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x13, 0x41, 0x73,
- 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c,
- 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c,
- 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53,
- 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65,
- 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e,
- 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64,
- 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73,
- 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
- 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17,
- 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74,
- 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x08, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69,
- 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x13, 0x4c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22,
- 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x09,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x79,
- 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72,
- 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a,
- 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x67,
- 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
- 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61,
- 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64,
- 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f,
- 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f,
- 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c,
- 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73,
- 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76,
- 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x50,
- 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
- 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74,
- 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a,
- 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74,
- 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f,
- 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53,
- 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65,
- 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22,
- 0x46, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
- 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x72,
- 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4e,
- 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d,
- 0x65, 0x4e, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x22, 0x9e, 0x03, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12,
- 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x72, 0x5f,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64,
- 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x69, 0x70,
- 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, 0x70, 0x68, 0x65,
- 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12,
- 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65,
- 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
- 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49,
- 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f,
- 0x75, 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x47,
- 0x72, 0x6f, 0x75, 0x70, 0x22, 0xb7, 0x02, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
- 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x0d,
- 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x06, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65,
- 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x19,
- 0x0a, 0x08, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x4e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b,
- 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x0b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x22, 0x9a,
- 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09,
- 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76,
- 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
- 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x73, 0x0a, 0x08, 0x4c,
- 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12,
- 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61,
- 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
- 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
- 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72,
- 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75,
- 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67,
- 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
- 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76,
- 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x22, 0xa4, 0x04, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12,
- 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66,
- 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e,
- 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc0, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f,
- 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
- 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a,
- 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12,
- 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x73, 0x79,
- 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x67, 0x72, 0x6f,
- 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12,
- 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72,
- 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64,
- 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64,
- 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61,
- 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x11, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x68,
- 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x14, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x26, 0x43, 0x61, 0x63, 0x68,
- 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x27, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d,
- 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
- 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x9b, 0x01, 0x0a, 0x0b, 0x4c, 0x6f, 0x63, 0x6b, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x65,
- 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0d, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x54, 0x6f, 0x4c, 0x6f,
- 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x14,
- 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f,
- 0x77, 0x6e, 0x65, 0x72, 0x22, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74,
- 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65,
- 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f,
- 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x6b,
- 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x6f,
- 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x6f, 0x76, 0x65, 0x64,
- 0x54, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x5f, 0x0a, 0x0d, 0x55, 0x6e, 0x6c, 0x6f,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a,
- 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x19,
- 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x69, 0x73, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x41, 0x0a, 0x0e, 0x55, 0x6e, 0x6c,
- 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x54, 0x6f, 0x22, 0x45, 0x0a, 0x14,
- 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6d,
- 0x6f, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4d, 0x6f,
- 0x76, 0x65, 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f,
- 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e,
- 0x65, 0x72, 0x22, 0x75, 0x0a, 0x04, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f,
- 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
- 0x22, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x41,
- 0x74, 0x4e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x3c, 0x0a, 0x14, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x24, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x0e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b,
- 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x32, 0x8f, 0x10, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65,
- 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69,
- 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64,
- 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d,
- 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e,
- 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52,
- 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x66, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65,
+ 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x69, 0x66, 0x4e, 0x6f, 0x74, 0x4d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x22, 0x2b, 0x0a, 0x13, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xba, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f,
0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e,
- 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x72,
- 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73,
- 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c,
- 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75,
- 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c,
- 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69,
- 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c,
- 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c,
- 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75,
- 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e,
- 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1f,
- 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c,
- 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
- 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c,
- 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b,
- 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73,
- 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69,
- 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x04, 0x50,
- 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50,
- 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x66, 0x69, 0x6c,
- 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e,
- 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65,
- 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x60, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65,
- 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x30, 0x01, 0x12, 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66,
- 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
- 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x47,
- 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76,
- 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c,
- 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16,
- 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
- 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x88, 0x01, 0x0a, 0x1f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c,
+ 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c,
+ 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c,
+ 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65,
+ 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65,
+ 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65,
+ 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x23, 0x0a, 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22,
+ 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a,
+ 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x89, 0x02, 0x0a,
+ 0x13, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07,
+ 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74,
+ 0x74, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65,
+ 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61,
+ 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61,
+ 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b,
+ 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64,
+ 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x41, 0x73, 0x73,
+ 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x61, 0x75, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x08,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x13,
+ 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49,
+ 0x64, 0x73, 0x22, 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x30, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0x79, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a,
+ 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b,
+ 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a,
+ 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x1a, 0x54, 0x0a, 0x11,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x22, 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69,
+ 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65,
+ 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+ 0x73, 0x22, 0x50, 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x63,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e,
+ 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a,
+ 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53,
+ 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70,
+ 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74,
+ 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0x46, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72,
+ 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x0c, 0x50, 0x69,
+ 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x24,
+ 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70,
+ 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xe8, 0x03, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69,
+ 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74,
+ 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64,
+ 0x69, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06,
+ 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69,
+ 0x70, 0x68, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f,
+ 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x18, 0x0a,
+ 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x6a, 0x6f, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d,
+ 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x22, 0xb7, 0x02, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x74,
+ 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x1b,
+ 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75,
+ 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x75,
+ 0x6e, 0x74, 0x69, 0x6c, 0x4e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x22, 0x9a, 0x01, 0x0a, 0x19,
+ 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x67, 0x0a, 0x1a, 0x54, 0x72, 0x61, 0x76,
+ 0x65, 0x72, 0x73, 0x65, 0x42, 0x66, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65,
+ 0x73, 0x22, 0x62, 0x0a, 0x1b, 0x54, 0x72, 0x61, 0x76, 0x65, 0x72, 0x73, 0x65, 0x42, 0x66, 0x73,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25,
+ 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05,
+ 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x73, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79,
+ 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65,
+ 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70,
+ 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50,
+ 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x73, 0x22, 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01,
+ 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
+ 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a,
+ 0x0c, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22,
+ 0x3b, 0x0a, 0x0d, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c,
+ 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa4, 0x04, 0x0a, 0x09,
+ 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
+ 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68,
+ 0x43, 0x6f, 0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
+ 0xc0, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73,
+ 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69,
+ 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13,
+ 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x65, 0x47, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74,
+ 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61,
+ 0x63, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b,
+ 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x6d,
+ 0x61, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x46, 0x69,
+ 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x34, 0x0a, 0x16,
+ 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x64, 0x69,
+ 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x26, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09,
+ 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50,
+ 0x0a, 0x27, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79,
+ 0x22, 0x9b, 0x01, 0x0a, 0x0b, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f,
+ 0x74, 0x6f, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x73,
+ 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x19, 0x0a,
+ 0x08, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x07, 0x69, 0x73, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65,
+ 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x91,
+ 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12,
+ 0x2b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x76,
+ 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x54, 0x6f, 0x12, 0x14, 0x0a, 0x05,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x22, 0x5f, 0x0a, 0x0d, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65, 0x77,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65,
+ 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6d,
+ 0x6f, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4d, 0x6f,
+ 0x76, 0x65, 0x64, 0x22, 0x41, 0x0a, 0x0e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6d,
+ 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
+ 0x6f, 0x76, 0x65, 0x64, 0x54, 0x6f, 0x22, 0x45, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f,
+ 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2d, 0x0a,
+ 0x15, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x75, 0x0a, 0x04,
+ 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6e, 0x65,
+ 0x77, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72,
+ 0x65, 0x6e, 0x65, 0x77, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77,
+ 0x6e, 0x65, 0x72, 0x22, 0x3c, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c,
+ 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x73, 0x22, 0x17, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf7, 0x10, 0x0a, 0x0c, 0x53,
+ 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72,
+ 0x69, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70,
+ 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70,
+ 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x30, 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41,
+ 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69,
+ 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a,
+ 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74,
+ 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a,
+ 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69,
+ 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x54, 0x72,
+ 0x61, 0x76, 0x65, 0x72, 0x73, 0x65, 0x42, 0x66, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x24, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61,
+ 0x76, 0x65, 0x72, 0x73, 0x65, 0x42, 0x66, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x76, 0x65, 0x72, 0x73, 0x65, 0x42, 0x66, 0x73, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x30, 0x01, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69,
+ 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
+ 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22,
+ 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
+ 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75,
+ 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x05, 0x4b,
+ 0x76, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+ 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66,
+ 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74,
+ 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, 0x1f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d,
+ 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0f,
- 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x12,
- 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
- 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x48, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x55,
- 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
- 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18,
- 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x46, 0x69,
- 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x66, 0x69,
- 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f,
- 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69,
- 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f,
- 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52,
- 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12,
- 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e,
- 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73,
- 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
- 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72,
- 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x69, 0x6c, 0x65,
+ 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42,
+ 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x12, 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
+ 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+ 0x64, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
+ 0x70, 0x62, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x6e, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d,
+ 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e,
+ 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x52, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b,
+ 0x73, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65,
+ 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c,
+ 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -5044,7 +5205,7 @@ func file_filer_proto_rawDescGZIP() []byte {
return file_filer_proto_rawDescData
}
-var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 68)
+var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 70)
var file_filer_proto_goTypes = []interface{}{
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
@@ -5089,38 +5250,40 @@ var file_filer_proto_goTypes = []interface{}{
(*GetFilerConfigurationResponse)(nil), // 40: filer_pb.GetFilerConfigurationResponse
(*SubscribeMetadataRequest)(nil), // 41: filer_pb.SubscribeMetadataRequest
(*SubscribeMetadataResponse)(nil), // 42: filer_pb.SubscribeMetadataResponse
- (*LogEntry)(nil), // 43: filer_pb.LogEntry
- (*KeepConnectedRequest)(nil), // 44: filer_pb.KeepConnectedRequest
- (*KeepConnectedResponse)(nil), // 45: filer_pb.KeepConnectedResponse
- (*LocateBrokerRequest)(nil), // 46: filer_pb.LocateBrokerRequest
- (*LocateBrokerResponse)(nil), // 47: filer_pb.LocateBrokerResponse
- (*KvGetRequest)(nil), // 48: filer_pb.KvGetRequest
- (*KvGetResponse)(nil), // 49: filer_pb.KvGetResponse
- (*KvPutRequest)(nil), // 50: filer_pb.KvPutRequest
- (*KvPutResponse)(nil), // 51: filer_pb.KvPutResponse
- (*FilerConf)(nil), // 52: filer_pb.FilerConf
- (*CacheRemoteObjectToLocalClusterRequest)(nil), // 53: filer_pb.CacheRemoteObjectToLocalClusterRequest
- (*CacheRemoteObjectToLocalClusterResponse)(nil), // 54: filer_pb.CacheRemoteObjectToLocalClusterResponse
- (*LockRequest)(nil), // 55: filer_pb.LockRequest
- (*LockResponse)(nil), // 56: filer_pb.LockResponse
- (*UnlockRequest)(nil), // 57: filer_pb.UnlockRequest
- (*UnlockResponse)(nil), // 58: filer_pb.UnlockResponse
- (*FindLockOwnerRequest)(nil), // 59: filer_pb.FindLockOwnerRequest
- (*FindLockOwnerResponse)(nil), // 60: filer_pb.FindLockOwnerResponse
- (*Lock)(nil), // 61: filer_pb.Lock
- (*TransferLocksRequest)(nil), // 62: filer_pb.TransferLocksRequest
- (*TransferLocksResponse)(nil), // 63: filer_pb.TransferLocksResponse
- nil, // 64: filer_pb.Entry.ExtendedEntry
- nil, // 65: filer_pb.LookupVolumeResponse.LocationsMapEntry
- (*LocateBrokerResponse_Resource)(nil), // 66: filer_pb.LocateBrokerResponse.Resource
- (*FilerConf_PathConf)(nil), // 67: filer_pb.FilerConf.PathConf
+ (*TraverseBfsMetadataRequest)(nil), // 43: filer_pb.TraverseBfsMetadataRequest
+ (*TraverseBfsMetadataResponse)(nil), // 44: filer_pb.TraverseBfsMetadataResponse
+ (*LogEntry)(nil), // 45: filer_pb.LogEntry
+ (*KeepConnectedRequest)(nil), // 46: filer_pb.KeepConnectedRequest
+ (*KeepConnectedResponse)(nil), // 47: filer_pb.KeepConnectedResponse
+ (*LocateBrokerRequest)(nil), // 48: filer_pb.LocateBrokerRequest
+ (*LocateBrokerResponse)(nil), // 49: filer_pb.LocateBrokerResponse
+ (*KvGetRequest)(nil), // 50: filer_pb.KvGetRequest
+ (*KvGetResponse)(nil), // 51: filer_pb.KvGetResponse
+ (*KvPutRequest)(nil), // 52: filer_pb.KvPutRequest
+ (*KvPutResponse)(nil), // 53: filer_pb.KvPutResponse
+ (*FilerConf)(nil), // 54: filer_pb.FilerConf
+ (*CacheRemoteObjectToLocalClusterRequest)(nil), // 55: filer_pb.CacheRemoteObjectToLocalClusterRequest
+ (*CacheRemoteObjectToLocalClusterResponse)(nil), // 56: filer_pb.CacheRemoteObjectToLocalClusterResponse
+ (*LockRequest)(nil), // 57: filer_pb.LockRequest
+ (*LockResponse)(nil), // 58: filer_pb.LockResponse
+ (*UnlockRequest)(nil), // 59: filer_pb.UnlockRequest
+ (*UnlockResponse)(nil), // 60: filer_pb.UnlockResponse
+ (*FindLockOwnerRequest)(nil), // 61: filer_pb.FindLockOwnerRequest
+ (*FindLockOwnerResponse)(nil), // 62: filer_pb.FindLockOwnerResponse
+ (*Lock)(nil), // 63: filer_pb.Lock
+ (*TransferLocksRequest)(nil), // 64: filer_pb.TransferLocksRequest
+ (*TransferLocksResponse)(nil), // 65: filer_pb.TransferLocksResponse
+ nil, // 66: filer_pb.Entry.ExtendedEntry
+ nil, // 67: filer_pb.LookupVolumeResponse.LocationsMapEntry
+ (*LocateBrokerResponse_Resource)(nil), // 68: filer_pb.LocateBrokerResponse.Resource
+ (*FilerConf_PathConf)(nil), // 69: filer_pb.FilerConf.PathConf
}
var file_filer_proto_depIdxs = []int32{
5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry
5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry
8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk
11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes
- 64, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
+ 66, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry
4, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry
5, // 6: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry
5, // 7: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry
@@ -5134,67 +5297,70 @@ var file_filer_proto_depIdxs = []int32{
7, // 15: filer_pb.StreamRenameEntryResponse.event_notification:type_name -> filer_pb.EventNotification
28, // 16: filer_pb.AssignVolumeResponse.location:type_name -> filer_pb.Location
28, // 17: filer_pb.Locations.locations:type_name -> filer_pb.Location
- 65, // 18: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
+ 67, // 18: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry
30, // 19: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection
7, // 20: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification
- 66, // 21: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
- 67, // 22: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
- 5, // 23: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry
- 61, // 24: filer_pb.TransferLocksRequest.locks:type_name -> filer_pb.Lock
- 27, // 25: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
- 0, // 26: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
- 2, // 27: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
- 12, // 28: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
- 14, // 29: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
- 16, // 30: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
- 18, // 31: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
- 20, // 32: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
- 22, // 33: filer_pb.SeaweedFiler.StreamRenameEntry:input_type -> filer_pb.StreamRenameEntryRequest
- 24, // 34: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
- 26, // 35: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
- 31, // 36: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest
- 33, // 37: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
- 35, // 38: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
- 37, // 39: filer_pb.SeaweedFiler.Ping:input_type -> filer_pb.PingRequest
- 39, // 40: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
- 41, // 41: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
- 41, // 42: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
- 48, // 43: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
- 50, // 44: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
- 53, // 45: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest
- 55, // 46: filer_pb.SeaweedFiler.DistributedLock:input_type -> filer_pb.LockRequest
- 57, // 47: filer_pb.SeaweedFiler.DistributedUnlock:input_type -> filer_pb.UnlockRequest
- 59, // 48: filer_pb.SeaweedFiler.FindLockOwner:input_type -> filer_pb.FindLockOwnerRequest
- 62, // 49: filer_pb.SeaweedFiler.TransferLocks:input_type -> filer_pb.TransferLocksRequest
- 1, // 50: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
- 3, // 51: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
- 13, // 52: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
- 15, // 53: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
- 17, // 54: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
- 19, // 55: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
- 21, // 56: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
- 23, // 57: filer_pb.SeaweedFiler.StreamRenameEntry:output_type -> filer_pb.StreamRenameEntryResponse
- 25, // 58: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
- 29, // 59: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
- 32, // 60: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse
- 34, // 61: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
- 36, // 62: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
- 38, // 63: filer_pb.SeaweedFiler.Ping:output_type -> filer_pb.PingResponse
- 40, // 64: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
- 42, // 65: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
- 42, // 66: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
- 49, // 67: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
- 51, // 68: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
- 54, // 69: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse
- 56, // 70: filer_pb.SeaweedFiler.DistributedLock:output_type -> filer_pb.LockResponse
- 58, // 71: filer_pb.SeaweedFiler.DistributedUnlock:output_type -> filer_pb.UnlockResponse
- 60, // 72: filer_pb.SeaweedFiler.FindLockOwner:output_type -> filer_pb.FindLockOwnerResponse
- 63, // 73: filer_pb.SeaweedFiler.TransferLocks:output_type -> filer_pb.TransferLocksResponse
- 50, // [50:74] is the sub-list for method output_type
- 26, // [26:50] is the sub-list for method input_type
- 26, // [26:26] is the sub-list for extension type_name
- 26, // [26:26] is the sub-list for extension extendee
- 0, // [0:26] is the sub-list for field type_name
+ 5, // 21: filer_pb.TraverseBfsMetadataResponse.entry:type_name -> filer_pb.Entry
+ 68, // 22: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource
+ 69, // 23: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf
+ 5, // 24: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry
+ 63, // 25: filer_pb.TransferLocksRequest.locks:type_name -> filer_pb.Lock
+ 27, // 26: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations
+ 0, // 27: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest
+ 2, // 28: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest
+ 12, // 29: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest
+ 14, // 30: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest
+ 16, // 31: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest
+ 18, // 32: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest
+ 20, // 33: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest
+ 22, // 34: filer_pb.SeaweedFiler.StreamRenameEntry:input_type -> filer_pb.StreamRenameEntryRequest
+ 24, // 35: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest
+ 26, // 36: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest
+ 31, // 37: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest
+ 33, // 38: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest
+ 35, // 39: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest
+ 37, // 40: filer_pb.SeaweedFiler.Ping:input_type -> filer_pb.PingRequest
+ 39, // 41: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest
+ 43, // 42: filer_pb.SeaweedFiler.TraverseBfsMetadata:input_type -> filer_pb.TraverseBfsMetadataRequest
+ 41, // 43: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 41, // 44: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest
+ 50, // 45: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest
+ 52, // 46: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest
+ 55, // 47: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest
+ 57, // 48: filer_pb.SeaweedFiler.DistributedLock:input_type -> filer_pb.LockRequest
+ 59, // 49: filer_pb.SeaweedFiler.DistributedUnlock:input_type -> filer_pb.UnlockRequest
+ 61, // 50: filer_pb.SeaweedFiler.FindLockOwner:input_type -> filer_pb.FindLockOwnerRequest
+ 64, // 51: filer_pb.SeaweedFiler.TransferLocks:input_type -> filer_pb.TransferLocksRequest
+ 1, // 52: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse
+ 3, // 53: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse
+ 13, // 54: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse
+ 15, // 55: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse
+ 17, // 56: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse
+ 19, // 57: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse
+ 21, // 58: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse
+ 23, // 59: filer_pb.SeaweedFiler.StreamRenameEntry:output_type -> filer_pb.StreamRenameEntryResponse
+ 25, // 60: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse
+ 29, // 61: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse
+ 32, // 62: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse
+ 34, // 63: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse
+ 36, // 64: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse
+ 38, // 65: filer_pb.SeaweedFiler.Ping:output_type -> filer_pb.PingResponse
+ 40, // 66: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse
+ 44, // 67: filer_pb.SeaweedFiler.TraverseBfsMetadata:output_type -> filer_pb.TraverseBfsMetadataResponse
+ 42, // 68: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 42, // 69: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse
+ 51, // 70: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse
+ 53, // 71: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse
+ 56, // 72: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse
+ 58, // 73: filer_pb.SeaweedFiler.DistributedLock:output_type -> filer_pb.LockResponse
+ 60, // 74: filer_pb.SeaweedFiler.DistributedUnlock:output_type -> filer_pb.UnlockResponse
+ 62, // 75: filer_pb.SeaweedFiler.FindLockOwner:output_type -> filer_pb.FindLockOwnerResponse
+ 65, // 76: filer_pb.SeaweedFiler.TransferLocks:output_type -> filer_pb.TransferLocksResponse
+ 52, // [52:77] is the sub-list for method output_type
+ 27, // [27:52] is the sub-list for method input_type
+ 27, // [27:27] is the sub-list for extension type_name
+ 27, // [27:27] is the sub-list for extension extendee
+ 0, // [0:27] is the sub-list for field type_name
}
func init() { file_filer_proto_init() }
@@ -5720,7 +5886,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LogEntry); i {
+ switch v := v.(*TraverseBfsMetadataRequest); i {
case 0:
return &v.state
case 1:
@@ -5732,7 +5898,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeepConnectedRequest); i {
+ switch v := v.(*TraverseBfsMetadataResponse); i {
case 0:
return &v.state
case 1:
@@ -5744,7 +5910,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeepConnectedResponse); i {
+ switch v := v.(*LogEntry); i {
case 0:
return &v.state
case 1:
@@ -5756,7 +5922,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LocateBrokerRequest); i {
+ switch v := v.(*KeepConnectedRequest); i {
case 0:
return &v.state
case 1:
@@ -5768,7 +5934,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LocateBrokerResponse); i {
+ switch v := v.(*KeepConnectedResponse); i {
case 0:
return &v.state
case 1:
@@ -5780,7 +5946,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KvGetRequest); i {
+ switch v := v.(*LocateBrokerRequest); i {
case 0:
return &v.state
case 1:
@@ -5792,7 +5958,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KvGetResponse); i {
+ switch v := v.(*LocateBrokerResponse); i {
case 0:
return &v.state
case 1:
@@ -5804,7 +5970,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KvPutRequest); i {
+ switch v := v.(*KvGetRequest); i {
case 0:
return &v.state
case 1:
@@ -5816,7 +5982,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KvPutResponse); i {
+ switch v := v.(*KvGetResponse); i {
case 0:
return &v.state
case 1:
@@ -5828,7 +5994,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FilerConf); i {
+ switch v := v.(*KvPutRequest); i {
case 0:
return &v.state
case 1:
@@ -5840,7 +6006,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i {
+ switch v := v.(*KvPutResponse); i {
case 0:
return &v.state
case 1:
@@ -5852,7 +6018,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i {
+ switch v := v.(*FilerConf); i {
case 0:
return &v.state
case 1:
@@ -5864,7 +6030,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LockRequest); i {
+ switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i {
case 0:
return &v.state
case 1:
@@ -5876,7 +6042,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LockResponse); i {
+ switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i {
case 0:
return &v.state
case 1:
@@ -5888,7 +6054,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UnlockRequest); i {
+ switch v := v.(*LockRequest); i {
case 0:
return &v.state
case 1:
@@ -5900,7 +6066,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UnlockResponse); i {
+ switch v := v.(*LockResponse); i {
case 0:
return &v.state
case 1:
@@ -5912,7 +6078,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FindLockOwnerRequest); i {
+ switch v := v.(*UnlockRequest); i {
case 0:
return &v.state
case 1:
@@ -5924,7 +6090,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FindLockOwnerResponse); i {
+ switch v := v.(*UnlockResponse); i {
case 0:
return &v.state
case 1:
@@ -5936,7 +6102,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Lock); i {
+ switch v := v.(*FindLockOwnerRequest); i {
case 0:
return &v.state
case 1:
@@ -5948,7 +6114,7 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TransferLocksRequest); i {
+ switch v := v.(*FindLockOwnerResponse); i {
case 0:
return &v.state
case 1:
@@ -5960,6 +6126,30 @@ func file_filer_proto_init() {
}
}
file_filer_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Lock); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransferLocksRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_filer_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TransferLocksResponse); i {
case 0:
return &v.state
@@ -5971,7 +6161,7 @@ func file_filer_proto_init() {
return nil
}
}
- file_filer_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ file_filer_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LocateBrokerResponse_Resource); i {
case 0:
return &v.state
@@ -5983,7 +6173,7 @@ func file_filer_proto_init() {
return nil
}
}
- file_filer_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ file_filer_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FilerConf_PathConf); i {
case 0:
return &v.state
@@ -6002,7 +6192,7 @@ func file_filer_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_filer_proto_rawDesc,
NumEnums: 0,
- NumMessages: 68,
+ NumMessages: 70,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go
index cb9367a7d..046920940 100644
--- a/weed/pb/filer_pb/filer_client_bfs.go
+++ b/weed/pb/filer_pb/filer_client_bfs.go
@@ -1,7 +1,10 @@
package filer_pb
import (
+ "context"
"fmt"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "io"
"sync"
"time"
@@ -12,7 +15,7 @@ func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(pare
K := 5
var jobQueueWg sync.WaitGroup
- queue := util.NewQueue()
+ queue := util.NewQueue[util.FullPath]()
jobQueueWg.Add(1)
queue.Enqueue(parentPath)
terminates := make([]chan bool, K)
@@ -26,11 +29,11 @@ func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(pare
return
default:
t := queue.Dequeue()
- if t == nil {
+ if t == "" {
time.Sleep(329 * time.Millisecond)
continue
}
- dir := t.(util.FullPath)
+ dir := t
processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn)
if processErr != nil {
err = processErr
@@ -47,7 +50,7 @@ func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(pare
return
}
-func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue[util.FullPath], jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error {
@@ -65,3 +68,28 @@ func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queu
})
}
+
+func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64, fn func(parentPath util.FullPath, entry *Entry)error) (err error) {
+ glog.V(0).Infof("TraverseBfsMetadata %v if before %v", dir, time.Unix(0, olderThanTsNs))
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := client.TraverseBfsMetadata(ctx, &TraverseBfsMetadataRequest{
+ Directory: string(dir),
+ })
+ if err != nil {
+ return fmt.Errorf("traverse bfs metadata: %v", err)
+ }
+ for {
+ resp, err := stream.Recv()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return fmt.Errorf("traverse bfs metadata: %v", err)
+ }
+ if err := fn(util.FullPath(resp.Directory), resp.Entry); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/weed/pb/filer_pb/filer_grpc.pb.go b/weed/pb/filer_pb/filer_grpc.pb.go
index ae1564f43..87b8de33d 100644
--- a/weed/pb/filer_pb/filer_grpc.pb.go
+++ b/weed/pb/filer_pb/filer_grpc.pb.go
@@ -34,6 +34,7 @@ const (
SeaweedFiler_Statistics_FullMethodName = "/filer_pb.SeaweedFiler/Statistics"
SeaweedFiler_Ping_FullMethodName = "/filer_pb.SeaweedFiler/Ping"
SeaweedFiler_GetFilerConfiguration_FullMethodName = "/filer_pb.SeaweedFiler/GetFilerConfiguration"
+ SeaweedFiler_TraverseBfsMetadata_FullMethodName = "/filer_pb.SeaweedFiler/TraverseBfsMetadata"
SeaweedFiler_SubscribeMetadata_FullMethodName = "/filer_pb.SeaweedFiler/SubscribeMetadata"
SeaweedFiler_SubscribeLocalMetadata_FullMethodName = "/filer_pb.SeaweedFiler/SubscribeLocalMetadata"
SeaweedFiler_KvGet_FullMethodName = "/filer_pb.SeaweedFiler/KvGet"
@@ -64,6 +65,7 @@ type SeaweedFilerClient interface {
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
+ TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_TraverseBfsMetadataClient, error)
SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error)
KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error)
@@ -265,8 +267,40 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF
return out, nil
}
+func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_TraverseBfsMetadataClient, error) {
+ stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], SeaweedFiler_TraverseBfsMetadata_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerTraverseBfsMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_TraverseBfsMetadataClient interface {
+ Recv() (*TraverseBfsMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerTraverseBfsMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerTraverseBfsMetadataClient) Recv() (*TraverseBfsMetadataResponse, error) {
+ m := new(TraverseBfsMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
- stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], SeaweedFiler_SubscribeMetadata_FullMethodName, opts...)
+ stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], SeaweedFiler_SubscribeMetadata_FullMethodName, opts...)
if err != nil {
return nil, err
}
@@ -298,7 +332,7 @@ func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse
}
func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) {
- stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], SeaweedFiler_SubscribeLocalMetadata_FullMethodName, opts...)
+ stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[4], SeaweedFiler_SubscribeLocalMetadata_FullMethodName, opts...)
if err != nil {
return nil, err
}
@@ -411,6 +445,7 @@ type SeaweedFilerServer interface {
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
Ping(context.Context, *PingRequest) (*PingResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
+ TraverseBfsMetadata(*TraverseBfsMetadataRequest, SeaweedFiler_TraverseBfsMetadataServer) error
SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error
KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error)
@@ -473,6 +508,9 @@ func (UnimplementedSeaweedFilerServer) Ping(context.Context, *PingRequest) (*Pin
func (UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented")
}
+func (UnimplementedSeaweedFilerServer) TraverseBfsMetadata(*TraverseBfsMetadataRequest, SeaweedFiler_TraverseBfsMetadataServer) error {
+ return status.Errorf(codes.Unimplemented, "method TraverseBfsMetadata not implemented")
+}
func (UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error {
return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented")
}
@@ -789,6 +827,27 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_TraverseBfsMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(TraverseBfsMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).TraverseBfsMetadata(m, &seaweedFilerTraverseBfsMetadataServer{stream})
+}
+
+type SeaweedFiler_TraverseBfsMetadataServer interface {
+ Send(*TraverseBfsMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerTraverseBfsMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerTraverseBfsMetadataServer) Send(m *TraverseBfsMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SubscribeMetadataRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -1057,6 +1116,11 @@ var SeaweedFiler_ServiceDesc = grpc.ServiceDesc{
ServerStreams: true,
},
{
+ StreamName: "TraverseBfsMetadata",
+ Handler: _SeaweedFiler_TraverseBfsMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
StreamName: "SubscribeMetadata",
Handler: _SeaweedFiler_SubscribeMetadata_Handler,
ServerStreams: true,
diff --git a/weed/pb/filer_pb_tail.go b/weed/pb/filer_pb_tail.go
index b54dad871..b7cca7585 100644
--- a/weed/pb/filer_pb_tail.go
+++ b/weed/pb/filer_pb_tail.go
@@ -17,6 +17,7 @@ const (
TrivialOnError EventErrorType = iota
FatalOnError
RetryForeverOnError
+ DontLogError
)
// MetadataFollowOption is used to control the behavior of the metadata following
@@ -96,6 +97,8 @@ func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn Proc
glog.Errorf("process %v: %v", resp, err)
return true
})
+ case DontLogError:
+ // pass
default:
glog.Errorf("process %v: %v", resp, err)
}
diff --git a/weed/remote_storage/traverse_bfs.go b/weed/remote_storage/traverse_bfs.go
deleted file mode 100644
index 983555f6c..000000000
--- a/weed/remote_storage/traverse_bfs.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package remote_storage
-
-import (
- "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
- "github.com/seaweedfs/seaweedfs/weed/util"
- "sync"
- "time"
-)
-
-type ListDirectoryFunc func(parentDir util.FullPath, visitFn VisitFunc) error
-
-func TraverseBfs(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc) (err error) {
- K := 5
-
- var dirQueueWg sync.WaitGroup
- dirQueue := util.NewQueue()
- dirQueueWg.Add(1)
- dirQueue.Enqueue(parentPath)
- var isTerminating bool
-
- for i := 0; i < K; i++ {
- go func() {
- for {
- if isTerminating {
- break
- }
- t := dirQueue.Dequeue()
- if t == nil {
- time.Sleep(329 * time.Millisecond)
- continue
- }
- dir := t.(util.FullPath)
- processErr := processOneDirectory(listDirFn, dir, visitFn, dirQueue, &dirQueueWg)
- if processErr != nil {
- err = processErr
- }
- dirQueueWg.Done()
- }
- }()
- }
-
- dirQueueWg.Wait()
- isTerminating = true
- return
-
-}
-
-func processOneDirectory(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc, dirQueue *util.Queue, dirQueueWg *sync.WaitGroup) error {
-
- return listDirFn(parentPath, func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error {
- if err := visitFn(dir, name, isDirectory, remoteEntry); err != nil {
- return err
- }
- if !isDirectory {
- return nil
- }
- dirQueueWg.Add(1)
- dirQueue.Enqueue(parentPath.Child(name))
- return nil
- })
-
-}
diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go
index 9682ca623..4a77fd04a 100644
--- a/weed/replication/repl_util/replication_util.go
+++ b/weed/replication/repl_util/replication_util.go
@@ -4,7 +4,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/replication/source"
- "github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error {
@@ -21,7 +21,7 @@ func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerS
var shouldRetry bool
for _, fileUrl := range fileUrls {
- shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) {
+ shouldRetry, err = util_http.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) {
writeErr = writeFunc(data)
})
if err != nil {
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index 9bbd7b8eb..890e68fd4 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -5,8 +5,10 @@ import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/replication/repl_util"
+ "net/http"
"net/url"
"strings"
+ "time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/seaweedfs/seaweedfs/weed/filer"
@@ -109,7 +111,16 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
- _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.BlobTagsMap{}, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{})
+ accessCondition := azblob.BlobAccessConditions{}
+ if entry.Attributes!=nil && entry.Attributes.Mtime>0 {
+ accessCondition.ModifiedAccessConditions.IfUnmodifiedSince = time.Unix(entry.Attributes.Mtime, 0)
+ }
+
+ res, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, accessCondition, azblob.BlobTagsMap{}, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{})
+ if res != nil && res.StatusCode() == http.StatusPreconditionFailed {
+ glog.V(0).Infof("skip overwriting %s/%s: %v", g.container, key, err)
+ return nil
+ }
if err != nil {
return err
}
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index 63e1226b6..4bcbc7898 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) {
@@ -88,9 +89,15 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
- fileId, uploadResult, err, _ := operation.UploadWithRetry(
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
+ return "", fmt.Errorf("upload data: %v", err)
+ }
+
+ fileId, uploadResult, err, _ := uploader.UploadWithRetry(
fs,
&filer_pb.AssignVolumeRequest{
Count: 1,
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index ce2de41b9..49f6877a0 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -120,6 +120,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
glog.V(3).Infof("already replicated %s", key)
return nil
}
+ if resp.Entry.Attributes != nil && resp.Entry.Attributes.Mtime >= entry.Attributes.Mtime {
+ glog.V(3).Infof("skip overwriting %s", key)
+ return nil
+ }
}
replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key)
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index 167907a5a..768e251a4 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type ReplicationSource interface {
@@ -106,7 +107,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) {
if fs.proxyByFiler {
- return util.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "")
+ return util_http.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "")
}
fileUrls, err := fs.LookupFileId(fileId)
@@ -115,7 +116,7 @@ func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Hea
}
for _, fileUrl := range fileUrls {
- filename, header, resp, err = util.DownloadFile(fileUrl, "")
+ filename, header, resp, err = util_http.DownloadFile(fileUrl, "")
if err != nil {
glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
} else {
diff --git a/weed/s3api/AmazonS3.xsd b/weed/s3api/AmazonS3.xsd
index 8016a6a83..8a0136b44 100644
--- a/weed/s3api/AmazonS3.xsd
+++ b/weed/s3api/AmazonS3.xsd
@@ -525,6 +525,7 @@
<xsd:element name="IsTruncated" type="xsd:boolean"/>
<xsd:element name="Contents" type="tns:ListEntry" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="CommonPrefixes" type="tns:PrefixEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="EncodingType" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
diff --git a/weed/s3api/README.txt b/weed/s3api/README.txt
index 10a18ff4d..f7eb1988a 100644
--- a/weed/s3api/README.txt
+++ b/weed/s3api/README.txt
@@ -1,7 +1,7 @@
see https://blog.aqwari.net/xml-schema-go/
1. go get aqwari.net/xml/cmd/xsdgen
-2. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd
-
-
-
+2. Add EncodingType element for ListBucketResult in AmazonS3.xsd
+3. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd
+4. Remove empty Grantee struct in s3api_xsd_generated.go
+5. Remove xmlns: sed s'/http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/\ //' s3api_xsd_generated.go
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
index 6ff67b5bf..bb67c35c2 100644
--- a/weed/s3api/auto_signature_v4_test.go
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -43,7 +43,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) {
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
- inputReq, err := http.NewRequest("GET", "http://example.com", nil)
+ inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
@@ -85,9 +85,9 @@ func TestIsReqAuthenticated(t *testing.T) {
s3Error s3err.ErrorCode
}{
// When request is unsigned, access denied is returned.
- {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied},
+ {mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied},
// When request is properly signed, error is none.
- {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone},
+ {mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone},
}
// Validates all testcases.
@@ -117,8 +117,8 @@ func TestCheckaAnonymousRequestAuthType(t *testing.T) {
ErrCode s3err.ErrorCode
Action Action
}{
- {Request: mustNewRequest("GET", "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrNone, Action: s3_constants.ACTION_READ},
- {Request: mustNewRequest("PUT", "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrAccessDenied, Action: s3_constants.ACTION_WRITE},
+ {Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrNone, Action: s3_constants.ACTION_READ},
+ {Request: mustNewRequest(http.MethodPut, "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrAccessDenied, Action: s3_constants.ACTION_WRITE},
}
for i, testCase := range testCases {
_, s3Error := iam.authRequest(testCase.Request, testCase.Action)
@@ -155,9 +155,9 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
Request *http.Request
ErrCode s3err.ErrorCode
}{
- {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied},
- {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
- {Request: mustNewPresignedRequest(iam, "GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ {Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied},
+ {Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
+ {Request: mustNewPresignedRequest(iam, http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
}
for i, testCase := range testCases {
if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {
@@ -214,7 +214,7 @@ func mustNewPresignedRequest(iam *IdentityAccessManagement, method string, urlSt
// Returns new HTTP request object.
func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
if method == "" {
- method = "POST"
+ method = http.MethodPost
}
// Save for subsequent use
diff --git a/weed/s3api/s3api_acl_helper.go b/weed/s3api/s3api_acl_helper.go
index 0332b6a39..b9fb1131e 100644
--- a/weed/s3api/s3api_acl_helper.go
+++ b/weed/s3api/s3api_acl_helper.go
@@ -9,9 +9,9 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
- "github.com/seaweedfs/seaweedfs/weed/util"
"net/http"
"strings"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type AccountManager interface {
@@ -32,7 +32,7 @@ func GetAccountId(r *http.Request) string {
// ExtractAcl extracts the acl from the request body, or from the header if request body is empty
func ExtractAcl(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, ownerId, accountId string) (grants []*s3.Grant, errCode s3err.ErrorCode) {
if r.Body != nil && r.Body != http.NoBody {
- defer util.CloseRequest(r)
+ defer util_http.CloseRequest(r)
var acp s3.AccessControlPolicy
err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "")
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 12d2c0432..7d0d76ea4 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -13,7 +13,6 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
- "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
@@ -26,14 +25,9 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
-type ListAllMyBucketsResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"`
- Owner *s3.Owner
- Buckets []*s3.Bucket `xml:"Buckets>Bucket"`
-}
-
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
glog.V(3).Infof("ListBucketsHandler")
@@ -59,25 +53,25 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
identityId := r.Header.Get(s3_constants.AmzIdentityId)
- var buckets []*s3.Bucket
+ var listBuckets ListAllMyBucketsList
for _, entry := range entries {
if entry.IsDirectory {
if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name, "") {
continue
}
- buckets = append(buckets, &s3.Bucket{
- Name: aws.String(entry.Name),
- CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()),
+ listBuckets.Bucket = append(listBuckets.Bucket, ListAllMyBucketsEntry{
+ Name: entry.Name,
+ CreationDate: time.Unix(entry.Attributes.Crtime, 0).UTC(),
})
}
}
response = ListAllMyBucketsResult{
- Owner: &s3.Owner{
- ID: aws.String(identityId),
- DisplayName: aws.String(identityId),
+ Owner: CanonicalUser{
+ ID: identityId,
+ DisplayName: identityId,
},
- Buckets: buckets,
+ Buckets: listBuckets,
}
writeSuccessResponseXML(w, r, response)
@@ -461,7 +455,11 @@ func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *h
for prefix, ttl := range collectionTtls {
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
if strings.HasPrefix(prefix, bucketPrefix) && strings.HasSuffix(ttl, "d") {
- fc.DeleteLocationConf(prefix)
+ pathConf, found := fc.GetLocationConf(prefix)
+ if found {
+ pathConf.Ttl = ""
+ fc.SetLocationConf(pathConf)
+ }
changed = true
}
}
@@ -487,7 +485,7 @@ func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *h
// GetBucketLocationHandler Get bucket location
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html
func (s3a *S3ApiServer) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
- writeSuccessResponseXML(w, r, LocationConstraint{})
+ writeSuccessResponseXML(w, r, CreateBucketConfiguration{})
}
// GetBucketRequestPaymentHandler Get bucket location
@@ -513,7 +511,7 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt
}
var v s3.OwnershipControls
- defer util.CloseRequest(r)
+ defer util_http.CloseRequest(r)
err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "")
if err != nil {
diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go
index 1cff45aa0..2c8a3ae2c 100644
--- a/weed/s3api/s3api_bucket_handlers_test.go
+++ b/weed/s3api/s3api_bucket_handlers_test.go
@@ -4,37 +4,34 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"testing"
"time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
)
func TestListBucketsHandler(t *testing.T) {
expected := `<?xml version="1.0" encoding="UTF-8"?>
-<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><DisplayName></DisplayName><ID></ID></Owner><Buckets><Bucket><CreationDate>2011-04-09T12:34:49Z</CreationDate><Name>test1</Name></Bucket><Bucket><CreationDate>2011-02-09T12:34:49Z</CreationDate><Name>test2</Name></Bucket></Buckets></ListAllMyBucketsResult>`
+<ListAllMyBucketsResult><Owner><ID></ID></Owner><Buckets><Bucket><Name>test1</Name><CreationDate>2011-04-09T12:34:49Z</CreationDate></Bucket><Bucket><Name>test2</Name><CreationDate>2011-02-09T12:34:49Z</CreationDate></Bucket></Buckets></ListAllMyBucketsResult>`
var response ListAllMyBucketsResult
- var buckets []*s3.Bucket
- buckets = append(buckets, &s3.Bucket{
- Name: aws.String("test1"),
- CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)),
+ var bucketsList ListAllMyBucketsList
+ bucketsList.Bucket = append(bucketsList.Bucket, ListAllMyBucketsEntry{
+ Name: "test1",
+ CreationDate: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC),
})
- buckets = append(buckets, &s3.Bucket{
- Name: aws.String("test2"),
- CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)),
+ bucketsList.Bucket = append(bucketsList.Bucket, ListAllMyBucketsEntry{
+ Name: "test2",
+ CreationDate: time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC),
})
response = ListAllMyBucketsResult{
- Owner: &s3.Owner{
- ID: aws.String(""),
- DisplayName: aws.String(""),
+ Owner: CanonicalUser{
+ ID: "",
+ DisplayName: "",
},
- Buckets: buckets,
+ Buckets: bucketsList,
}
encoded := string(s3err.EncodeXMLResponse(response))
if encoded != expected {
- t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected)
+ t.Errorf("unexpected output:%s\nexpecting:%s", encoded, expected)
}
}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 1d58af8bc..3ab72285f 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -3,6 +3,8 @@ package s3api
import (
"bytes"
"fmt"
+ "github.com/seaweedfs/seaweedfs/weed/filer"
+ "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"io"
"net/http"
"net/url"
@@ -14,7 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser {
@@ -35,10 +37,17 @@ func urlEscapeObject(object string) string {
return "/" + t
}
+func entryUrlEncode(dir string, entry string, encodingTypeUrl bool) (dirName string, entryName string, prefix string) {
+ if !encodingTypeUrl {
+ return dir, entry, entry
+ }
+ return urlPathEscape(dir), url.QueryEscape(entry), urlPathEscape(entry)
+}
+
func urlPathEscape(object string) string {
var escapedParts []string
for _, part := range strings.Split(object, "/") {
- escapedParts = append(escapedParts, url.PathEscape(part))
+ escapedParts = append(escapedParts, strings.ReplaceAll(url.PathEscape(part), "+", "%2B"))
}
return strings.Join(escapedParts, "/")
}
@@ -63,6 +72,37 @@ func removeDuplicateSlashes(object string) string {
return result.String()
}
+func newListEntry(entry *filer_pb.Entry, key string, dir string, name string, bucketPrefix string, fetchOwner bool, isDirectory bool, encodingTypeUrl bool) (listEntry ListEntry) {
+ storageClass := "STANDARD"
+ if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
+ storageClass = string(v)
+ }
+ keyFormat := "%s/%s"
+ if isDirectory {
+ keyFormat += "/"
+ }
+ if key == "" {
+ key = fmt.Sprintf(keyFormat, dir, name)[len(bucketPrefix):]
+ }
+ if encodingTypeUrl {
+ key = urlPathEscape(key)
+ }
+ listEntry = ListEntry{
+ Key: key,
+ LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
+ ETag: "\"" + filer.ETag(entry) + "\"",
+ Size: int64(filer.FileSize(entry)),
+ StorageClass: StorageClass(storageClass),
+ }
+ if fetchOwner {
+ listEntry.Owner = CanonicalUser{
+ ID: fmt.Sprintf("%x", entry.Attributes.Uid),
+ DisplayName: entry.Attributes.UserName,
+ }
+ }
+ return listEntry
+}
+
func (s3a *S3ApiServer) toFilerUrl(bucket, object string) string {
object = urlPathEscape(removeDuplicateSlashes(object))
destUrl := fmt.Sprintf("http://%s%s/%s%s",
@@ -131,7 +171,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
if resp.StatusCode == http.StatusPreconditionFailed {
s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed)
@@ -143,7 +183,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
return
}
- if r.Method == "DELETE" {
+ if r.Method == http.MethodDelete {
if resp.StatusCode == http.StatusNotFound {
// this is normal
responseStatusCode := responseFn(resp, w)
diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go
index 8d13fe17e..4ca8010d2 100644
--- a/weed/s3api/s3api_object_handlers_copy.go
+++ b/weed/s3api/s3api_object_handlers_copy.go
@@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
const (
@@ -87,12 +88,12 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
- _, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
+ _, _, resp, err := util_http.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
if tagErr != nil {
@@ -175,12 +176,12 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
- resp, dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
+ resp, dataReader, err := util_http.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
defer dataReader.Close()
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
diff --git a/weed/s3api/s3api_object_handlers_delete.go b/weed/s3api/s3api_object_handlers_delete.go
index 580578593..7656b9d38 100644
--- a/weed/s3api/s3api_object_handlers_delete.go
+++ b/weed/s3api/s3api_object_handlers_delete.go
@@ -27,14 +27,13 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
bucket, object := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object)
- object = urlPathEscape(removeDuplicateSlashes(object))
+ target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
+ dir, name := target.DirAndName()
- s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
+ err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
- err := doDeleteEntry(client, s3a.option.BucketsPath+"/"+bucket, object, true, false)
- if err != nil {
- // skip deletion error, usually the file is not found
- return nil
+ if err := doDeleteEntry(client, dir, name, true, false); err != nil {
+ return err
}
if s3a.option.AllowEmptyFolder {
@@ -42,11 +41,8 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
}
directoriesWithDeletion := make(map[string]int)
- lastSeparator := strings.LastIndex(object, "/")
- if lastSeparator > 0 {
- parentDirectoryPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object[:lastSeparator])
- directoriesWithDeletion[parentDirectoryPath]++
-
+ if strings.LastIndex(object, "/") > 0 {
+ directoriesWithDeletion[dir]++
// purge empty folders, only checking folders with deletions
for len(directoriesWithDeletion) > 0 {
directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion)
@@ -55,6 +51,10 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
return nil
})
+ if err != nil {
+ s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
+ return
+ }
w.WriteHeader(http.StatusNoContent)
}
diff --git a/weed/s3api/s3api_object_handlers_list.go b/weed/s3api/s3api_object_handlers_list.go
index 38e7f6fef..27d18800c 100644
--- a/weed/s3api/s3api_object_handlers_list.go
+++ b/weed/s3api/s3api_object_handlers_list.go
@@ -4,33 +4,44 @@ import (
"context"
"encoding/xml"
"fmt"
+ "github.com/aws/aws-sdk-go/service/s3"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
+ "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"io"
"net/http"
"net/url"
"strconv"
"strings"
- "time"
-
- "github.com/seaweedfs/seaweedfs/weed/filer"
- "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
+type OptionalString struct {
+ string
+ set bool
+}
+
+func (o OptionalString) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+ if !o.set {
+ return nil
+ }
+ return e.EncodeElement(o.string, startElement)
+}
+
type ListBucketResultV2 struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
- Name string `xml:"Name"`
- Prefix string `xml:"Prefix"`
- MaxKeys int `xml:"MaxKeys"`
- Delimiter string `xml:"Delimiter,omitempty"`
- IsTruncated bool `xml:"IsTruncated"`
- Contents []ListEntry `xml:"Contents,omitempty"`
- CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
- ContinuationToken string `xml:"ContinuationToken,omitempty"`
- NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
- KeyCount int `xml:"KeyCount"`
- StartAfter string `xml:"StartAfter,omitempty"`
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ MaxKeys uint16 `xml:"MaxKeys"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Contents []ListEntry `xml:"Contents,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
+ ContinuationToken OptionalString `xml:"ContinuationToken,omitempty"`
+ NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
+ EncodingType string `xml:"EncodingType,omitempty"`
+ KeyCount int `xml:"KeyCount"`
+ StartAfter string `xml:"StartAfter,omitempty"`
}
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
@@ -41,19 +52,19 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
bucket, _ := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
- originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
+ originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys := getListObjectsV2Args(r.URL.Query())
if maxKeys < 0 {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
return
}
- marker := continuationToken
- if continuationToken == "" {
+ marker := continuationToken.string
+ if !continuationToken.set {
marker = startAfter
}
- response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, fetchOwner)
if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
@@ -68,7 +79,6 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
}
responseV2 := &ListBucketResultV2{
- XMLName: response.XMLName,
Name: response.Name,
CommonPrefixes: response.CommonPrefixes,
Contents: response.Contents,
@@ -76,11 +86,14 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
Delimiter: response.Delimiter,
IsTruncated: response.IsTruncated,
KeyCount: len(response.Contents) + len(response.CommonPrefixes),
- MaxKeys: response.MaxKeys,
+ MaxKeys: uint16(response.MaxKeys),
NextContinuationToken: response.NextMarker,
Prefix: response.Prefix,
StartAfter: startAfter,
}
+ if encodingTypeUrl {
+ responseV2.EncodingType = s3.EncodingTypeUrl
+ }
writeSuccessResponseXML(w, r, responseV2)
}
@@ -93,14 +106,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
bucket, _ := s3_constants.GetBucketAndObject(r)
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
- originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
+ originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys)
return
}
-
- response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, uint16(maxKeys), marker, delimiter, encodingTypeUrl, true)
if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
@@ -117,7 +129,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
writeSuccessResponseXML(w, r, response)
}
-func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, originalMarker string, delimiter string) (response ListBucketResult, err error) {
+func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys uint16, originalMarker string, delimiter string, encodingTypeUrl bool, fetchOwner bool) (response ListBucketResult, err error) {
// convert full path prefix into directory name and prefix for entry name
requestDir, prefix, marker := normalizePrefixMarker(originalPrefix, originalMarker)
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
@@ -141,23 +153,15 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
empty := true
nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, cursor, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
empty = false
+ dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl)
if entry.IsDirectory {
if entry.IsDirectoryKeyObject() {
- contents = append(contents, ListEntry{
- Key: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
- LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
- ETag: "\"" + filer.ETag(entry) + "\"",
- Owner: CanonicalUser{
- ID: fmt.Sprintf("%x", entry.Attributes.Uid),
- DisplayName: entry.Attributes.UserName,
- },
- StorageClass: "STANDARD",
- })
+ contents = append(contents, newListEntry(entry, "", dirName, entryName, bucketPrefix, fetchOwner, true, false))
cursor.maxKeys--
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
} else if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter.
commonPrefixes = append(commonPrefixes, PrefixEntry{
- Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
+ Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):],
})
//All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.
cursor.maxKeys--
@@ -195,21 +199,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
}
}
if !delimiterFound {
- storageClass := "STANDARD"
- if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
- storageClass = string(v)
- }
- contents = append(contents, ListEntry{
- Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
- LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
- ETag: "\"" + filer.ETag(entry) + "\"",
- Size: int64(filer.FileSize(entry)),
- Owner: CanonicalUser{
- ID: fmt.Sprintf("%x", entry.Attributes.Uid),
- DisplayName: entry.Attributes.UserName,
- },
- StorageClass: StorageClass(storageClass),
- })
+ contents = append(contents, newListEntry(entry, "", dirName, entryName, bucketPrefix, fetchOwner, false, false))
cursor.maxKeys--
}
}
@@ -237,13 +227,17 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
Prefix: originalPrefix,
Marker: originalMarker,
NextMarker: nextMarker,
- MaxKeys: maxKeys,
+ MaxKeys: int(maxKeys),
Delimiter: delimiter,
IsTruncated: cursor.isTruncated,
Contents: contents,
CommonPrefixes: commonPrefixes,
}
-
+ if encodingTypeUrl {
+ // Todo used for pass test_bucket_listv2_encoding_basic
+ // sort.Slice(response.CommonPrefixes, func(i, j int) bool { return response.CommonPrefixes[i].Prefix < response.CommonPrefixes[j].Prefix })
+ response.EncodingType = s3.EncodingTypeUrl
+ }
return nil
})
@@ -251,7 +245,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
}
type ListingCursor struct {
- maxKeys int
+ maxKeys uint16
isTruncated bool
prefixEndsOnDelimiter bool
}
@@ -434,13 +428,16 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
return
}
-func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
+func getListObjectsV2Args(values url.Values) (prefix, startAfter, delimiter string, token OptionalString, encodingTypeUrl bool, fetchOwner bool, maxkeys uint16) {
prefix = values.Get("prefix")
- token = values.Get("continuation-token")
+ token = OptionalString{set: values.Has("continuation-token"), string: values.Get("continuation-token")}
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
+ encodingTypeUrl = values.Get("encoding-type") == s3.EncodingTypeUrl
if values.Get("max-keys") != "" {
- maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
+ if maxKeys, err := strconv.ParseUint(values.Get("max-keys"), 10, 16); err == nil {
+ maxkeys = uint16(maxKeys)
+ }
} else {
maxkeys = maxObjectListSizeLimit
}
@@ -448,12 +445,15 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
return
}
-func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
+func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, encodingTypeUrl bool, maxkeys int16) {
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
+ encodingTypeUrl = values.Get("encoding-type") == "url"
if values.Get("max-keys") != "" {
- maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
+ if maxKeys, err := strconv.ParseInt(values.Get("max-keys"), 10, 16); err == nil {
+ maxkeys = int16(maxKeys)
+ }
} else {
maxkeys = maxObjectListSizeLimit
}
diff --git a/weed/s3api/s3api_object_handlers_list_test.go b/weed/s3api/s3api_object_handlers_list_test.go
index 6974c5567..3295c2fca 100644
--- a/weed/s3api/s3api_object_handlers_list_test.go
+++ b/weed/s3api/s3api_object_handlers_list_test.go
@@ -12,7 +12,7 @@ func TestListObjectsHandler(t *testing.T) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
expected := `<?xml version="1.0" encoding="UTF-8"?>
-<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Name>test_container</Name><Prefix></Prefix><Marker></Marker><MaxKeys>1000</MaxKeys><IsTruncated>false</IsTruncated><Contents><Key>1.zip</Key><ETag>&#34;4397da7a7649e8085de9916c240e8166&#34;</ETag><Size>1234567</Size><Owner><ID>65a011niqo39cdf8ec533ec3d1ccaafsa932</ID></Owner><StorageClass>STANDARD</StorageClass><LastModified>2011-04-09T12:34:49Z</LastModified></Contents></ListBucketResult>`
+<ListBucketResult><Name>test_container</Name><Prefix></Prefix><Marker></Marker><MaxKeys>1000</MaxKeys><IsTruncated>false</IsTruncated><Contents><Key>1.zip</Key><ETag>&#34;4397da7a7649e8085de9916c240e8166&#34;</ETag><Size>1234567</Size><Owner><ID>65a011niqo39cdf8ec533ec3d1ccaafsa932</ID></Owner><StorageClass>STANDARD</StorageClass><LastModified>2011-04-09T12:34:49Z</LastModified></Contents><EncodingType></EncodingType></ListBucketResult>`
response := ListBucketResult{
Name: "test_container",
diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go
index 49d385afc..0f8743a30 100644
--- a/weed/s3api/s3api_object_handlers_put.go
+++ b/weed/s3api/s3api_object_handlers_put.go
@@ -110,7 +110,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
hash := md5.New()
var body = io.TeeReader(dataReader, hash)
- proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
+ proxyReq, err := http.NewRequest(http.MethodPut, uploadUrl, body)
if err != nil {
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index 9422318ce..e0517ffb7 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -20,6 +20,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
+ util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
)
type S3ApiServerOption struct {
@@ -44,7 +46,7 @@ type S3ApiServer struct {
cb *CircuitBreaker
randomClientId int32
filerGuard *security.Guard
- client *http.Client
+ client util_http_client.HTTPClientInterface
bucketRegistry *BucketRegistry
}
@@ -84,10 +86,9 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
}
s3ApiServer.bucketRegistry = NewBucketRegistry(s3ApiServer)
if option.LocalFilerSocket == "" {
- s3ApiServer.client = &http.Client{Transport: &http.Transport{
- MaxIdleConns: 1024,
- MaxIdleConnsPerHost: 1024,
- }}
+ if s3ApiServer.client, err = util_http.NewGlobalHttpClient(); err != nil {
+ return nil, err
+ }
} else {
s3ApiServer.client = &http.Client{
Transport: &http.Transport{
@@ -109,9 +110,9 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
apiRouter := router.PathPrefix("/").Subrouter()
// Readiness Probe
- apiRouter.Methods("GET").Path("/status").HandlerFunc(s3a.StatusHandler)
+ apiRouter.Methods(http.MethodGet).Path("/status").HandlerFunc(s3a.StatusHandler)
- apiRouter.Methods("OPTIONS").HandlerFunc(
+ apiRouter.Methods(http.MethodOptions).HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin != "" {
@@ -161,135 +162,135 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// objects with query
// CopyObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
+ bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
// AbortMultipartUpload
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
// GetObjectTagging
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
+ bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
// PutObjectTagging
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
// DeleteObjectTagging
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
+ bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
// PutObjectACL
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
// PutObjectRetention
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
// PutObjectLegalHold
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
// PutObjectLockConfiguration
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
// GetObjectACL
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
+ bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
// objects with query
// raw objects
// HeadObject
- bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
+ bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
// GetObject, but directory listing is not supported
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET"))
+ bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET"))
// CopyObject
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
// PutObject
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
+ bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
// DeleteObject
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
+ bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
// raw objects
// buckets with query
// DeleteMultipleObjects
- bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
+ bucket.Methods(http.MethodPost).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
// GetBucketACL
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
// PutBucketACL
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "")
// GetBucketPolicy
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
// PutBucketPolicy
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
// DeleteBucketPolicy
- bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
// GetBucketCors
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
// PutBucketCors
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
// DeleteBucketCors
- bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
// GetBucketLifecycleConfiguration
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
// PutBucketLifecycleConfiguration
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
// DeleteBucketLifecycleConfiguration
- bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
// GetBucketLocation
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
// GetBucketRequestPayment
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
// GetBucketVersioning
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketVersioningHandler, ACTION_READ)), "GET")).Queries("versioning", "")
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketVersioningHandler, ACTION_WRITE)), "PUT")).Queries("versioning", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketVersioningHandler, ACTION_READ)), "GET")).Queries("versioning", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketVersioningHandler, ACTION_WRITE)), "PUT")).Queries("versioning", "")
// ListObjectsV2
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
// buckets with query
// PutBucketOwnershipControls
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketOwnershipControls, ACTION_ADMIN), "PUT")).Queries("ownershipControls", "")
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketOwnershipControls, ACTION_ADMIN), "PUT")).Queries("ownershipControls", "")
//GetBucketOwnershipControls
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketOwnershipControls, ACTION_READ), "GET")).Queries("ownershipControls", "")
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketOwnershipControls, ACTION_READ), "GET")).Queries("ownershipControls", "")
//DeleteBucketOwnershipControls
- bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketOwnershipControls, ACTION_ADMIN), "DELETE")).Queries("ownershipControls", "")
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketOwnershipControls, ACTION_ADMIN), "DELETE")).Queries("ownershipControls", "")
// raw buckets
// PostPolicy
- bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
+ bucket.Methods(http.MethodPost).HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
// HeadBucket
- bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
+ bucket.Methods(http.MethodHead).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
// PutBucket
- bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketHandler, ACTION_ADMIN)), "PUT"))
+ bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketHandler, ACTION_ADMIN)), "PUT"))
// DeleteBucket
- bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_DELETE_BUCKET)), "DELETE"))
+ bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_DELETE_BUCKET)), "DELETE"))
// ListObjectsV1 (Legacy)
- bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
+ bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
// raw buckets
}
// ListBuckets
- apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST"))
+ apiRouter.Methods(http.MethodGet).Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST"))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler)
diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go
index dd6a32ff2..f883287d5 100644
--- a/weed/s3api/s3api_xsd_generated.go
+++ b/weed/s3api/s3api_xsd_generated.go
@@ -1,3 +1,5 @@
+// Code generated by xsdgen. DO NOT EDIT.
+
package s3api
import (
@@ -17,11 +19,546 @@ type AccessControlPolicy struct {
}
type AmazonCustomerByEmail struct {
- EmailAddress string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ EmailAddress"`
+ EmailAddress string `xml:"EmailAddress"`
+}
+
+type Anon1 struct {
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon1) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon1
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon1) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon1
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon10 struct {
+}
+
+type Anon11 struct {
+ Bucket string `xml:"Bucket"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon11) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon11
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon11) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon11
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon12 struct {
+}
+
+type Anon13 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ GetMetadata bool `xml:"GetMetadata"`
+ GetData bool `xml:"GetData"`
+ InlineData bool `xml:"InlineData"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon13) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon13
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon13) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon13
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon14 struct {
+ GetObjectResponse GetObjectResult `xml:"GetObjectResponse"`
+}
+
+type Anon15 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ GetMetadata bool `xml:"GetMetadata"`
+ GetData bool `xml:"GetData"`
+ InlineData bool `xml:"InlineData"`
+ ByteRangeStart int64 `xml:"ByteRangeStart,omitempty"`
+ ByteRangeEnd int64 `xml:"ByteRangeEnd,omitempty"`
+ IfModifiedSince time.Time `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince time.Time `xml:"IfUnmodifiedSince,omitempty"`
+ IfMatch []string `xml:"IfMatch,omitempty"`
+ IfNoneMatch []string `xml:"IfNoneMatch,omitempty"`
+ ReturnCompleteObjectOnConditionFailure bool `xml:"ReturnCompleteObjectOnConditionFailure,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon15) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon15
+ var layout struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince)
+ layout.IfUnmodifiedSince = (*xsdDateTime)(&layout.T.IfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon15) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon15
+ var overlay struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince)
+ overlay.IfUnmodifiedSince = (*xsdDateTime)(&overlay.T.IfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon16 struct {
+ GetObjectResponse GetObjectResult `xml:"GetObjectResponse"`
+}
+
+type Anon17 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ ContentLength int64 `xml:"ContentLength"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon17) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon17
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon17) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon17
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon18 struct {
+ PutObjectResponse PutObjectResult `xml:"PutObjectResponse"`
+}
+
+type Anon19 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ Data []byte `xml:"Data"`
+ ContentLength int64 `xml:"ContentLength"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon19) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon19
+ var layout struct {
+ *T
+ Data *xsdBase64Binary `xml:"Data"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Data = (*xsdBase64Binary)(&layout.T.Data)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon19) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon19
+ var overlay struct {
+ *T
+ Data *xsdBase64Binary `xml:"Data"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon2 struct {
+ GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"GetBucketLoggingStatusResponse"`
+}
+
+type Anon20 struct {
+ PutObjectInlineResponse PutObjectResult `xml:"PutObjectInlineResponse"`
+}
+
+type Anon21 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon21) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon21
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon21) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon21
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon22 struct {
+ DeleteObjectResponse Status `xml:"DeleteObjectResponse"`
+}
+
+type Anon23 struct {
+ Bucket string `xml:"Bucket"`
+ Prefix string `xml:"Prefix,omitempty"`
+ Marker string `xml:"Marker,omitempty"`
+ MaxKeys int `xml:"MaxKeys,omitempty"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon23) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon23
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon23) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon23
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon24 struct {
+ ListBucketResponse ListBucketResult `xml:"ListBucketResponse"`
+}
+
+type Anon25 struct {
+ ListVersionsResponse ListVersionsResult `xml:"ListVersionsResponse"`
+}
+
+type Anon26 struct {
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+}
+
+func (t *Anon26) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon26
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon26) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon26
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon27 struct {
+ ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"ListAllMyBucketsResponse"`
+}
+
+type Anon28 struct {
+ Location string `xml:"Location"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ ETag string `xml:"ETag"`
+}
+
+type Anon29 struct {
+ SourceBucket string `xml:"SourceBucket"`
+ SourceKey string `xml:"SourceKey"`
+ DestinationBucket string `xml:"DestinationBucket"`
+ DestinationKey string `xml:"DestinationKey"`
+ MetadataDirective MetadataDirective `xml:"MetadataDirective,omitempty"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ CopySourceIfModifiedSince time.Time `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince time.Time `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ CopySourceIfMatch []string `xml:"CopySourceIfMatch,omitempty"`
+ CopySourceIfNoneMatch []string `xml:"CopySourceIfNoneMatch,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon29) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon29
+ var layout struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince)
+ layout.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon29) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon29
+ var overlay struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince)
+ overlay.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon3 struct {
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+ BucketLoggingStatus BucketLoggingStatus `xml:"BucketLoggingStatus"`
+}
+
+func (t *Anon3) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon3
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon3) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon3
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon30 struct {
+ CopyObjectResult CopyObjectResult `xml:"CopyObjectResult"`
+}
+
+type Anon4 struct {
+}
+
+type Anon5 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon5) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon5
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon5) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon5
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon6 struct {
+ GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"GetObjectAccessControlPolicyResponse"`
+}
+
+type Anon7 struct {
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon7) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon7
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon7) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon7
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Anon8 struct {
+ GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"GetBucketAccessControlPolicyResponse"`
+}
+
+type Anon9 struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AccessControlList AccessControlList `xml:"AccessControlList"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+}
+
+func (t *Anon9) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T Anon9
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *Anon9) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T Anon9
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
}
type BucketLoggingStatus struct {
- LoggingEnabled LoggingSettings `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LoggingEnabled,omitempty"`
+ LoggingEnabled LoggingSettings `xml:"LoggingEnabled,omitempty"`
}
type CanonicalUser struct {
@@ -30,31 +567,31 @@ type CanonicalUser struct {
}
type CopyObject struct {
- SourceBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceBucket"`
- SourceKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceKey"`
- DestinationBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationBucket"`
- DestinationKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationKey"`
- MetadataDirective MetadataDirective `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MetadataDirective,omitempty"`
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
- CopySourceIfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
- CopySourceIfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
- CopySourceIfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfMatch,omitempty"`
- CopySourceIfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfNoneMatch,omitempty"`
- StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ SourceBucket string `xml:"SourceBucket"`
+ SourceKey string `xml:"SourceKey"`
+ DestinationBucket string `xml:"DestinationBucket"`
+ DestinationKey string `xml:"DestinationKey"`
+ MetadataDirective MetadataDirective `xml:"MetadataDirective,omitempty"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ CopySourceIfModifiedSince time.Time `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince time.Time `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ CopySourceIfMatch []string `xml:"CopySourceIfMatch,omitempty"`
+ CopySourceIfNoneMatch []string `xml:"CopySourceIfNoneMatch,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *CopyObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T CopyObject
var layout struct {
*T
- CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
- CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince)
@@ -66,9 +603,9 @@ func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
type T CopyObject
var overlay struct {
*T
- CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
- CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince)
@@ -78,19 +615,19 @@ func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
}
type CopyObjectResponse struct {
- CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"`
+ CopyObjectResult CopyObjectResult `xml:"CopyObjectResult"`
}
type CopyObjectResult struct {
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
}
func (t *CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T CopyObjectResult
var layout struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
@@ -100,7 +637,7 @@ func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
type T CopyObjectResult
var overlay struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
@@ -108,18 +645,18 @@ func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
}
type CreateBucket struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Bucket string `xml:"Bucket"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
}
func (t *CreateBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T CreateBucket
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -129,7 +666,7 @@ func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
type T CreateBucket
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -137,30 +674,30 @@ func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
}
type CreateBucketConfiguration struct {
- LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
+ LocationConstraint string `xml:"LocationConstraint"`
}
type CreateBucketResponse struct {
- CreateBucketReturn CreateBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketReturn"`
+ CreateBucketReturn CreateBucketResult `xml:"CreateBucketReturn"`
}
type CreateBucketResult struct {
- BucketName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketName"`
+ BucketName string `xml:"BucketName"`
}
type DeleteBucket struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *DeleteBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T DeleteBucket
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -170,7 +707,7 @@ func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
type T DeleteBucket
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -178,22 +715,22 @@ func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
}
type DeleteBucketResponse struct {
- DeleteBucketResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteBucketResponse"`
+ DeleteBucketResponse Status `xml:"DeleteBucketResponse"`
}
type DeleteMarkerEntry struct {
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
- IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
- Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified time.Time `xml:"LastModified"`
+ Owner CanonicalUser `xml:"Owner,omitempty"`
}
func (t *DeleteMarkerEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T DeleteMarkerEntry
var layout struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
@@ -203,7 +740,7 @@ func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
type T DeleteMarkerEntry
var overlay struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
@@ -211,19 +748,19 @@ func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
}
type DeleteObject struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *DeleteObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T DeleteObject
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -233,7 +770,7 @@ func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
type T DeleteObject
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -241,22 +778,22 @@ func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
}
type DeleteObjectResponse struct {
- DeleteObjectResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteObjectResponse"`
+ DeleteObjectResponse Status `xml:"DeleteObjectResponse"`
}
type GetBucketAccessControlPolicy struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *GetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetBucketAccessControlPolicy
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -266,7 +803,7 @@ func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
type T GetBucketAccessControlPolicy
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -274,22 +811,22 @@ func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
}
type GetBucketAccessControlPolicyResponse struct {
- GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketAccessControlPolicyResponse"`
+ GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"GetBucketAccessControlPolicyResponse"`
}
type GetBucketLoggingStatus struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *GetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetBucketLoggingStatus
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -299,7 +836,7 @@ func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle
type T GetBucketLoggingStatus
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -307,26 +844,26 @@ func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle
}
type GetBucketLoggingStatusResponse struct {
- GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketLoggingStatusResponse"`
+ GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"GetBucketLoggingStatusResponse"`
}
type GetObject struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
- GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
- InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ GetMetadata bool `xml:"GetMetadata"`
+ GetData bool `xml:"GetData"`
+ InlineData bool `xml:"InlineData"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *GetObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetObject
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -336,7 +873,7 @@ func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type T GetObject
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -344,19 +881,19 @@ func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
}
type GetObjectAccessControlPolicy struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *GetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetObjectAccessControlPolicy
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -366,7 +903,7 @@ func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
type T GetObjectAccessControlPolicy
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -374,35 +911,35 @@ func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
}
type GetObjectAccessControlPolicyResponse struct {
- GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectAccessControlPolicyResponse"`
+ GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"GetObjectAccessControlPolicyResponse"`
}
type GetObjectExtended struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
- GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
- InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
- ByteRangeStart int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeStart,omitempty"`
- ByteRangeEnd int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeEnd,omitempty"`
- IfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
- IfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
- IfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfMatch,omitempty"`
- IfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfNoneMatch,omitempty"`
- ReturnCompleteObjectOnConditionFailure bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ReturnCompleteObjectOnConditionFailure,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ GetMetadata bool `xml:"GetMetadata"`
+ GetData bool `xml:"GetData"`
+ InlineData bool `xml:"InlineData"`
+ ByteRangeStart int64 `xml:"ByteRangeStart,omitempty"`
+ ByteRangeEnd int64 `xml:"ByteRangeEnd,omitempty"`
+ IfModifiedSince time.Time `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince time.Time `xml:"IfUnmodifiedSince,omitempty"`
+ IfMatch []string `xml:"IfMatch,omitempty"`
+ IfNoneMatch []string `xml:"IfNoneMatch,omitempty"`
+ ReturnCompleteObjectOnConditionFailure bool `xml:"ReturnCompleteObjectOnConditionFailure,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *GetObjectExtended) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetObjectExtended
var layout struct {
*T
- IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
- IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince)
@@ -414,9 +951,9 @@ func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
type T GetObjectExtended
var overlay struct {
*T
- IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
- IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince)
@@ -426,27 +963,27 @@ func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
}
type GetObjectExtendedResponse struct {
- GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+ GetObjectResponse GetObjectResult `xml:"GetObjectResponse"`
}
type GetObjectResponse struct {
- GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+ GetObjectResponse GetObjectResult `xml:"GetObjectResponse"`
}
type GetObjectResult struct {
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
- Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+ Status Status `xml:"Status"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ Data []byte `xml:"Data,omitempty"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
}
func (t *GetObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T GetObjectResult
var layout struct {
*T
- Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ Data *xsdBase64Binary `xml:"Data,omitempty"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.Data = (*xsdBase64Binary)(&layout.T.Data)
@@ -457,8 +994,8 @@ func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
type T GetObjectResult
var overlay struct {
*T
- Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ Data *xsdBase64Binary `xml:"Data,omitempty"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
@@ -471,30 +1008,21 @@ type Grant struct {
Permission Permission `xml:"Permission"`
}
-type Grantee struct {
- XMLNS string `xml:"xmlns:xsi,attr"`
- XMLXSI string `xml:"xsi:type,attr"`
- Type string `xml:"Type"`
- ID string `xml:"ID,omitempty"`
- DisplayName string `xml:"DisplayName,omitempty"`
- URI string `xml:"URI,omitempty"`
-}
-
type Group struct {
- URI string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ URI"`
+ URI string `xml:"URI"`
}
type ListAllMyBuckets struct {
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
}
func (t *ListAllMyBuckets) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T ListAllMyBuckets
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -504,7 +1032,7 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
type T ListAllMyBuckets
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -542,26 +1070,31 @@ type ListAllMyBucketsList struct {
}
type ListAllMyBucketsResponse struct {
- ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResponse"`
+ ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"ListAllMyBucketsResponse"`
+}
+
+type ListAllMyBucketsResult struct {
+ Owner CanonicalUser `xml:"Owner"`
+ Buckets ListAllMyBucketsList `xml:"Buckets"`
}
type ListBucket struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix,omitempty"`
- Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker,omitempty"`
- MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys,omitempty"`
- Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Prefix string `xml:"Prefix,omitempty"`
+ Marker string `xml:"Marker,omitempty"`
+ MaxKeys int `xml:"MaxKeys,omitempty"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *ListBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T ListBucket
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -571,7 +1104,7 @@ func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
type T ListBucket
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -579,11 +1112,10 @@ func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
}
type ListBucketResponse struct {
- ListBucketResponse ListBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResponse"`
+ ListBucketResponse ListBucketResult `xml:"ListBucketResponse"`
}
type ListBucketResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
Metadata []MetadataEntry `xml:"Metadata,omitempty"`
Name string `xml:"Name"`
Prefix string `xml:"Prefix"`
@@ -594,6 +1126,7 @@ type ListBucketResult struct {
IsTruncated bool `xml:"IsTruncated"`
Contents []ListEntry `xml:"Contents,omitempty"`
CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
+ EncodingType string `xml:"EncodingType"`
}
type ListEntry struct {
@@ -627,48 +1160,44 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
}
type ListVersionsResponse struct {
- ListVersionsResponse ListVersionsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResponse"`
+ ListVersionsResponse ListVersionsResult `xml:"ListVersionsResponse"`
}
type ListVersionsResult struct {
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
- Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
- KeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ KeyMarker"`
- VersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionIdMarker"`
- NextKeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextKeyMarker,omitempty"`
- NextVersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextVersionIdMarker,omitempty"`
- MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
- Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
- IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
- Version VersionEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version"`
- DeleteMarker DeleteMarkerEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker"`
- CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
-}
-
-type LocationConstraint struct {
- LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ KeyMarker string `xml:"KeyMarker"`
+ VersionIdMarker string `xml:"VersionIdMarker"`
+ NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
+ NextVersionIdMarker string `xml:"NextVersionIdMarker,omitempty"`
+ MaxKeys int `xml:"MaxKeys"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Version VersionEntry `xml:"Version,omitempty"`
+ DeleteMarker DeleteMarkerEntry `xml:"DeleteMarker,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"`
}
type LoggingSettings struct {
- TargetBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetBucket"`
- TargetPrefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetPrefix"`
- TargetGrants AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetGrants,omitempty"`
+ TargetBucket string `xml:"TargetBucket"`
+ TargetPrefix string `xml:"TargetPrefix"`
+ TargetGrants AccessControlList `xml:"TargetGrants,omitempty"`
}
// May be one of COPY, REPLACE
type MetadataDirective string
type MetadataEntry struct {
- Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
- Value string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Value"`
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
}
// May be one of Enabled, Disabled
type MfaDeleteStatus string
type NotificationConfiguration struct {
- TopicConfiguration []TopicConfiguration `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TopicConfiguration,omitempty"`
+ TopicConfiguration []TopicConfiguration `xml:"TopicConfiguration,omitempty"`
}
// May be one of BucketOwner, Requester
@@ -678,10 +1207,10 @@ type Payer string
type Permission string
type PostResponse struct {
- Location string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Location"`
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Location string `xml:"Location"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ ETag string `xml:"ETag"`
}
type PrefixEntry struct {
@@ -689,23 +1218,23 @@ type PrefixEntry struct {
}
type PutObject struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
- StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ ContentLength int64 `xml:"ContentLength"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *PutObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T PutObject
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -715,7 +1244,7 @@ func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type T PutObject
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -723,25 +1252,25 @@ func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
}
type PutObjectInline struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
- Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
- ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
- StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ Metadata []MetadataEntry `xml:"Metadata,omitempty"`
+ Data []byte `xml:"Data"`
+ ContentLength int64 `xml:"ContentLength"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *PutObjectInline) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T PutObjectInline
var layout struct {
*T
- Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Data *xsdBase64Binary `xml:"Data"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Data = (*xsdBase64Binary)(&layout.T.Data)
@@ -752,8 +1281,8 @@ func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
type T PutObjectInline
var overlay struct {
*T
- Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Data *xsdBase64Binary `xml:"Data"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
@@ -762,23 +1291,23 @@ func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
}
type PutObjectInlineResponse struct {
- PutObjectInlineResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectInlineResponse"`
+ PutObjectInlineResponse PutObjectResult `xml:"PutObjectInlineResponse"`
}
type PutObjectResponse struct {
- PutObjectResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectResponse"`
+ PutObjectResponse PutObjectResult `xml:"PutObjectResponse"`
}
type PutObjectResult struct {
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"ETag"`
+ LastModified time.Time `xml:"LastModified"`
}
func (t *PutObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T PutObjectResult
var layout struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
@@ -788,7 +1317,7 @@ func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
type T PutObjectResult
var overlay struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
@@ -796,27 +1325,27 @@ func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
}
type RequestPaymentConfiguration struct {
- Payer Payer `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Payer"`
+ Payer Payer `xml:"Payer"`
}
type Result struct {
- Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+ Status Status `xml:"Status"`
}
type SetBucketAccessControlPolicy struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ AccessControlList AccessControlList `xml:"AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *SetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T SetBucketAccessControlPolicy
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -826,7 +1355,7 @@ func (t *SetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
type T SetBucketAccessControlPolicy
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -837,19 +1366,19 @@ type SetBucketAccessControlPolicyResponse struct {
}
type SetBucketLoggingStatus struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
- BucketLoggingStatus BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketLoggingStatus"`
+ Bucket string `xml:"Bucket"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
+ BucketLoggingStatus BucketLoggingStatus `xml:"BucketLoggingStatus"`
}
func (t *SetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T SetBucketLoggingStatus
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -859,7 +1388,7 @@ func (t *SetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle
type T SetBucketLoggingStatus
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -870,20 +1399,20 @@ type SetBucketLoggingStatusResponse struct {
}
type SetObjectAccessControlPolicy struct {
- Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
- AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
- Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
- Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
- Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ AccessControlList AccessControlList `xml:"AccessControlList"`
+ AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"Timestamp,omitempty"`
+ Signature string `xml:"Signature,omitempty"`
+ Credential string `xml:"Credential,omitempty"`
}
func (t *SetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T SetObjectAccessControlPolicy
var layout struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
layout.T = (*T)(t)
layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
@@ -893,7 +1422,7 @@ func (t *SetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St
type T SetObjectAccessControlPolicy
var overlay struct {
*T
- Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Timestamp *xsdDateTime `xml:"Timestamp,omitempty"`
}
overlay.T = (*T)(t)
overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
@@ -904,37 +1433,37 @@ type SetObjectAccessControlPolicyResponse struct {
}
type Status struct {
- Code int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Code"`
- Description string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Description"`
+ Code int `xml:"Code"`
+ Description string `xml:"Description"`
}
// May be one of STANDARD, REDUCED_REDUNDANCY, GLACIER, UNKNOWN
type StorageClass string
type TopicConfiguration struct {
- Topic string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Topic"`
- Event []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Event"`
+ Topic string `xml:"Topic"`
+ Event []string `xml:"Event"`
}
type User struct {
}
type VersionEntry struct {
- Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
- VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
- IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
- LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
- ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
- Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
- Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
- StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+ Size int64 `xml:"Size"`
+ Owner CanonicalUser `xml:"Owner,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass"`
}
func (t *VersionEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type T VersionEntry
var layout struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
layout.T = (*T)(t)
layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
@@ -944,7 +1473,7 @@ func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
type T VersionEntry
var overlay struct {
*T
- LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ LastModified *xsdDateTime `xml:"LastModified"`
}
overlay.T = (*T)(t)
overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
@@ -952,8 +1481,8 @@ func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
}
type VersioningConfiguration struct {
- Status VersioningStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status,omitempty"`
- MfaDelete MfaDeleteStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MfaDelete,omitempty"`
+ Status VersioningStatus `xml:"Status,omitempty"`
+ MfaDelete MfaDeleteStatus `xml:"MfaDelete,omitempty"`
}
// May be one of Enabled, Suspended
@@ -976,10 +1505,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) {
type xsdDateTime time.Time
func (t *xsdDateTime) UnmarshalText(text []byte) error {
- return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat)
+ return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999")
}
func (t xsdDateTime) MarshalText() ([]byte, error) {
- return []byte((time.Time)(t).Format(s3TimeFormat)), nil
+ return _marshalTime((time.Time)(t), "2006-01-02T15:04:05.999999999")
}
func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if (time.Time)(t).IsZero() {
@@ -1006,3 +1535,6 @@ func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {
}
return err
}
+func _marshalTime(t time.Time, format string) ([]byte, error) {
+ return []byte(t.Format(format + "Z07:00")), nil
+}
diff --git a/weed/s3api/s3api_xsd_generated_helper.go b/weed/s3api/s3api_xsd_generated_helper.go
new file mode 100644
index 000000000..24cdd2289
--- /dev/null
+++ b/weed/s3api/s3api_xsd_generated_helper.go
@@ -0,0 +1,10 @@
+package s3api
+
+type Grantee struct {
+ XMLNS string `xml:"xmlns:xsi,attr"`
+ XMLXSI string `xml:"xsi:type,attr"`
+ Type string `xml:"Type"`
+ ID string `xml:"ID,omitempty"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+ URI string `xml:"URI,omitempty"`
+}
diff --git a/weed/s3api/s3err/error_handler.go b/weed/s3api/s3err/error_handler.go
index 3fb04a313..466c0e61b 100644
--- a/weed/s3api/s3err/error_handler.go
+++ b/weed/s3api/s3err/error_handler.go
@@ -79,6 +79,7 @@ func setCommonHeaders(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes")
if r.Header.Get("Origin") != "" {
w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Expose-Headers", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
}
}
diff --git a/weed/security/tls.go b/weed/security/tls.go
index ae6510219..1a9dfacb5 100644
--- a/weed/security/tls.go
+++ b/weed/security/tls.go
@@ -4,16 +4,17 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/credentials/tls/certprovider/pemfile"
- "google.golang.org/grpc/security/advancedtls"
"os"
+ "slices"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/credentials/tls/certprovider/pemfile"
+ "google.golang.org/grpc/security/advancedtls"
)
const CredRefreshingInterval = time.Duration(5) * time.Hour
@@ -54,7 +55,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption
}
// Start a server and create a client using advancedtls API with Provider.
- options := &advancedtls.ServerOptions{
+ options := &advancedtls.Options{
IdentityOptions: advancedtls.IdentityCertificateOptions{
IdentityProvider: serverIdentityProvider,
},
@@ -62,7 +63,22 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption
RootProvider: serverRootProvider,
},
RequireClientCert: true,
- VType: advancedtls.CertVerification,
+ VerificationType: advancedtls.CertVerification,
+ }
+ options.MinTLSVersion, err = TlsVersionByName(config.GetString("tls.min_version"))
+ if err != nil {
+ glog.Warningf("tls min version parse failed, %v", err)
+ return nil, nil
+ }
+ options.MaxTLSVersion, err = TlsVersionByName(config.GetString("tls.max_version"))
+ if err != nil {
+ glog.Warningf("tls max version parse failed, %v", err)
+ return nil, nil
+ }
+ options.CipherSuites, err = TlsCipherSuiteByNames(config.GetString("tls.cipher_suites"))
+ if err != nil {
+ glog.Warningf("tls cipher suite parse failed, %v", err)
+ return nil, nil
}
allowedCommonNames := config.GetString(component + ".allowed_commonNames")
allowedWildcardDomain := config.GetString("grpc.allowed_wildcard_domain")
@@ -75,10 +91,10 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption
AllowedCommonNames: allowedCommonNamesMap,
AllowedWildcardDomain: allowedWildcardDomain,
}
- options.VerifyPeer = auther.Authenticate
+ options.AdditionalPeerVerification = auther.Authenticate
} else {
- options.VerifyPeer = func(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) {
- return &advancedtls.VerificationResults{}, nil
+ options.AdditionalPeerVerification = func(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
+ return &advancedtls.PostHandshakeVerificationResults{}, nil
}
}
ta, err := advancedtls.NewServerCreds(options)
@@ -118,17 +134,17 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption {
glog.Warningf("pemfile.NewProvider(%v) failed: %v", clientRootOptions, err)
return grpc.WithTransportCredentials(insecure.NewCredentials())
}
- options := &advancedtls.ClientOptions{
+ options := &advancedtls.Options{
IdentityOptions: advancedtls.IdentityCertificateOptions{
IdentityProvider: clientProvider,
},
- VerifyPeer: func(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) {
- return &advancedtls.VerificationResults{}, nil
+ AdditionalPeerVerification: func(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
+ return &advancedtls.PostHandshakeVerificationResults{}, nil
},
RootOptions: advancedtls.RootCertificateOptions{
RootProvider: clientRootProvider,
},
- VType: advancedtls.CertVerification,
+ VerificationType: advancedtls.CertVerification,
}
ta, err := advancedtls.NewClientCreds(options)
if err != nil {
@@ -155,14 +171,68 @@ func LoadClientTLSHTTP(clientCertFile string) *tls.Config {
}
}
-func (a Authenticator) Authenticate(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) {
+func (a Authenticator) Authenticate(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) {
if a.AllowedWildcardDomain != "" && strings.HasSuffix(params.Leaf.Subject.CommonName, a.AllowedWildcardDomain) {
- return &advancedtls.VerificationResults{}, nil
+ return &advancedtls.PostHandshakeVerificationResults{}, nil
}
if _, ok := a.AllowedCommonNames[params.Leaf.Subject.CommonName]; ok {
- return &advancedtls.VerificationResults{}, nil
+ return &advancedtls.PostHandshakeVerificationResults{}, nil
}
err := fmt.Errorf("Authenticate: invalid subject client common name: %s", params.Leaf.Subject.CommonName)
glog.Error(err)
return nil, err
}
+
+func FixTlsConfig(viper *util.ViperProxy, config *tls.Config) error {
+ var err error
+ config.MinVersion, err = TlsVersionByName(viper.GetString("tls.min_version"))
+ if err != nil {
+ return err
+ }
+ config.MaxVersion, err = TlsVersionByName(viper.GetString("tls.max_version"))
+ if err != nil {
+ return err
+ }
+ config.CipherSuites, err = TlsCipherSuiteByNames(viper.GetString("tls.cipher_suites"))
+ return err
+}
+
+func TlsVersionByName(name string) (uint16, error) {
+ switch name {
+ case "":
+ return 0, nil
+ case "SSLv3":
+ return tls.VersionSSL30, nil
+ case "TLS 1.0":
+ return tls.VersionTLS10, nil
+ case "TLS 1.1":
+ return tls.VersionTLS11, nil
+ case "TLS 1.2":
+ return tls.VersionTLS12, nil
+ case "TLS 1.3":
+ return tls.VersionTLS13, nil
+ default:
+ return 0, fmt.Errorf("invalid tls version %s", name)
+ }
+}
+
+func TlsCipherSuiteByNames(cipherSuiteNames string) ([]uint16, error) {
+ cipherSuiteNames = strings.TrimSpace(cipherSuiteNames)
+ if cipherSuiteNames == "" {
+ return nil, nil
+ }
+ names := strings.Split(cipherSuiteNames, ",")
+ cipherSuites := tls.CipherSuites()
+ cipherIds := make([]uint16, 0, len(names))
+ for _, name := range names {
+ name = strings.TrimSpace(name)
+ index := slices.IndexFunc(cipherSuites, func(suite *tls.CipherSuite) bool {
+ return name == suite.Name
+ })
+ if index == -1 {
+ return nil, fmt.Errorf("invalid tls cipher suite name %s", name)
+ }
+ cipherIds = append(cipherIds, cipherSuites[index].ID)
+ }
+ return cipherIds, nil
+}
diff --git a/weed/sequence/memory_sequencer.go b/weed/sequence/memory_sequencer.go
index 6e879bd79..92944266c 100644
--- a/weed/sequence/memory_sequencer.go
+++ b/weed/sequence/memory_sequencer.go
@@ -30,7 +30,3 @@ func (m *MemorySequencer) SetMax(seenValue uint64) {
m.counter = seenValue + 1
}
}
-
-func (m *MemorySequencer) Peek() uint64 {
- return m.counter
-}
diff --git a/weed/sequence/sequence.go b/weed/sequence/sequence.go
index 2258d001b..49de63b73 100644
--- a/weed/sequence/sequence.go
+++ b/weed/sequence/sequence.go
@@ -3,5 +3,4 @@ package sequence
type Sequencer interface {
NextFileId(count uint64) uint64
SetMax(uint64)
- Peek() uint64
}
diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go
index b63198349..05694f681 100644
--- a/weed/sequence/snowflake_sequencer.go
+++ b/weed/sequence/snowflake_sequencer.go
@@ -42,8 +42,3 @@ func (m *SnowflakeSequencer) NextFileId(count uint64) uint64 {
// ignore setmax as we are snowflake
func (m *SnowflakeSequencer) SetMax(seenValue uint64) {
}
-
-// return a new id as no Peek is stored
-func (m *SnowflakeSequencer) Peek() uint64 {
- return uint64(m.node.Generate().Int64())
-}
diff --git a/weed/server/common.go b/weed/server/common.go
index a7d67fb2e..e6f6cdb88 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -127,7 +127,7 @@ func debug(params ...interface{}) {
func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) {
m := make(map[string]interface{})
- if r.Method != "POST" {
+ if r.Method != http.MethodPost {
writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!"))
return
}
@@ -181,7 +181,12 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn ope
PairMap: pu.PairMap,
Jwt: assignResult.Auth,
}
- uploadResult, err := operation.UploadData(pu.Data, uploadOption)
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+ uploadResult, err := uploader.UploadData(pu.Data, uploadOption)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return
@@ -260,7 +265,7 @@ func handleStaticResources2(r *mux.Router) {
r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(http.FS(StaticFS))))
}
-func adjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) {
+func AdjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) {
for header, values := range r.Header {
if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(header)]; ok {
w.Header()[normalizedHeader] = values
@@ -284,7 +289,7 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
}
}
-func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, prepareWriteFn func(offset int64, size int64) (filer.DoStreamContent, error)) error {
+func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, prepareWriteFn func(offset int64, size int64) (filer.DoStreamContent, error)) error {
rangeReq := r.Header.Get("Range")
bufferedWriter := writePool.Get().(*bufio.Writer)
bufferedWriter.Reset(w)
@@ -297,14 +302,14 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
writeFn, err := prepareWriteFn(0, totalSize)
if err != nil {
- glog.Errorf("processRangeRequest: %v", err)
+ glog.Errorf("ProcessRangeRequest: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %v", err)
}
if err = writeFn(bufferedWriter); err != nil {
- glog.Errorf("processRangeRequest: %v", err)
+ glog.Errorf("ProcessRangeRequest: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %v", err)
}
return nil
}
@@ -313,9 +318,9 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
//mostly copy from src/pkg/net/http/fs.go
ranges, err := parseRange(rangeReq, totalSize)
if err != nil {
- glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err)
+ glog.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return fmt.Errorf("processRangeRequest header: %v", err)
+ return fmt.Errorf("ProcessRangeRequest header: %v", err)
}
if sumRangesSize(ranges) > totalSize {
// The total number of bytes in all the ranges
@@ -345,16 +350,16 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
writeFn, err := prepareWriteFn(ra.start, ra.length)
if err != nil {
- glog.Errorf("processRangeRequest range[0]: %+v err: %v", w.Header(), err)
+ glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest: %v", err)
+ return fmt.Errorf("ProcessRangeRequest: %v", err)
}
w.WriteHeader(http.StatusPartialContent)
err = writeFn(bufferedWriter)
if err != nil {
- glog.Errorf("processRangeRequest range[0]: %+v err: %v", w.Header(), err)
+ glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err)
http.Error(w, err.Error(), http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest range[0]: %v", err)
+ return fmt.Errorf("ProcessRangeRequest range[0]: %v", err)
}
return nil
}
@@ -369,9 +374,9 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
}
writeFn, err := prepareWriteFn(ra.start, ra.length)
if err != nil {
- glog.Errorf("processRangeRequest range[%d] err: %v", i, err)
+ glog.Errorf("ProcessRangeRequest range[%d] err: %v", i, err)
http.Error(w, "Internal Error", http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest range[%d] err: %v", i, err)
+ return fmt.Errorf("ProcessRangeRequest range[%d] err: %v", i, err)
}
writeFnByRange[i] = writeFn
}
@@ -406,9 +411,9 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
}
w.WriteHeader(http.StatusPartialContent)
if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil {
- glog.Errorf("processRangeRequest err: %v", err)
+ glog.Errorf("ProcessRangeRequest err: %v", err)
http.Error(w, "Internal Error", http.StatusInternalServerError)
- return fmt.Errorf("processRangeRequest err: %v", err)
+ return fmt.Errorf("ProcessRangeRequest err: %v", err)
}
return nil
}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index b9571710d..b1440c94f 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -291,7 +291,7 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr
glog.V(4).Infof("DeleteEntry %v", req)
- err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures)
+ err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures, req.IfNotModifiedAfter)
resp = &filer_pb.DeleteEntryResponse{}
if err != nil && err != filer_pb.ErrNotFound {
resp.Error = err.Error()
@@ -363,12 +363,7 @@ func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.Delet
glog.V(4).Infof("DeleteCollection %v", req)
- err = fs.filer.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
- _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
- Name: req.GetCollection(),
- })
- return err
- })
+ err = fs.filer.DoDeleteCollection(req.GetCollection())
return &filer_pb.DeleteCollectionResponse{}, err
}
diff --git a/weed/server/filer_grpc_server_admin.go b/weed/server/filer_grpc_server_admin.go
index b4caaf4e2..8b4912258 100644
--- a/weed/server/filer_grpc_server_admin.go
+++ b/weed/server/filer_grpc_server_admin.go
@@ -96,6 +96,8 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
MetricsIntervalSec: int32(fs.metricsIntervalSec),
Version: util.Version(),
FilerGroup: fs.option.FilerGroup,
+ MajorVersion: util.MAJOR_VERSION,
+ MinorVersion: util.MINOR_VERSION,
}
glog.V(4).Infof("GetFilerConfiguration: %v", t)
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index 3acea6f14..db00dd496 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -203,7 +203,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
// delete old entry
ctx = context.WithValue(ctx, "OP", "MV")
- deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures)
+ deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures, 0)
if deleteErr != nil {
return deleteErr
}
diff --git a/weed/server/filer_grpc_server_traverse_meta.go b/weed/server/filer_grpc_server_traverse_meta.go
new file mode 100644
index 000000000..4a924f065
--- /dev/null
+++ b/weed/server/filer_grpc_server_traverse_meta.go
@@ -0,0 +1,84 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/filer"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+ "github.com/viant/ptrie"
+)
+
+func (fs *FilerServer) TraverseBfsMetadata(req *filer_pb.TraverseBfsMetadataRequest, stream filer_pb.SeaweedFiler_TraverseBfsMetadataServer) error {
+
+ glog.V(0).Infof("TraverseBfsMetadata %v", req)
+
+ excludedTrie := ptrie.New[bool]()
+ for _, excluded := range req.ExcludedPrefixes {
+ excludedTrie.Put([]byte(excluded), true)
+ }
+
+ ctx := stream.Context()
+
+ queue := util.NewQueue[*filer.Entry]()
+ dirEntry, err := fs.filer.FindEntry(ctx, util.FullPath(req.Directory))
+ if err != nil {
+ return fmt.Errorf("find dir %s: %v", req.Directory, err)
+ }
+ queue.Enqueue(dirEntry)
+
+ for item := queue.Dequeue(); item != nil; item = queue.Dequeue() {
+ if excludedTrie.MatchPrefix([]byte(item.FullPath), func(key []byte, value bool) bool {
+ return true
+ }) {
+ // println("excluded", item.FullPath)
+ continue
+ }
+ parent, _ := item.FullPath.DirAndName()
+ if err := stream.Send(&filer_pb.TraverseBfsMetadataResponse{
+ Directory: parent,
+ Entry: item.ToProtoEntry(),
+ }); err != nil {
+ return fmt.Errorf("send traverse bfs metadata response: %v", err)
+ }
+
+ if !item.IsDirectory() {
+ continue
+ }
+
+ if err := fs.iterateDirectory(ctx, item.FullPath, func(entry *filer.Entry) error {
+ queue.Enqueue(entry)
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (fs *FilerServer) iterateDirectory(ctx context.Context, dirPath util.FullPath, fn func(entry *filer.Entry) error) (err error) {
+ var lastFileName string
+ var listErr error
+ for {
+ var hasEntries bool
+ lastFileName, listErr = fs.filer.StreamListDirectoryEntries(ctx, dirPath, lastFileName, false, 1024, "", "", "", func(entry *filer.Entry) bool {
+ hasEntries = true
+ if fnErr := fn(entry); fnErr != nil {
+ err = fnErr
+ return false
+ }
+ return true
+ })
+ if listErr != nil {
+ return listErr
+ }
+ if err != nil {
+ return err
+ }
+ if !hasEntries {
+ return nil
+ }
+ }
+}
diff --git a/weed/server/filer_grpc_server_traverse_meta_test.go b/weed/server/filer_grpc_server_traverse_meta_test.go
new file mode 100644
index 000000000..72f8a916e
--- /dev/null
+++ b/weed/server/filer_grpc_server_traverse_meta_test.go
@@ -0,0 +1,31 @@
+package weed_server
+
+import (
+ "github.com/stretchr/testify/assert"
+ "github.com/viant/ptrie"
+ "testing"
+)
+
+func TestPtrie(t *testing.T) {
+ b := []byte("/topics/abc/dev")
+ excludedTrie := ptrie.New[bool]()
+ excludedTrie.Put([]byte("/topics/abc/d"), true)
+ excludedTrie.Put([]byte("/topics/abc"), true)
+
+ assert.True(t, excludedTrie.MatchPrefix(b, func(key []byte, value bool) bool {
+ println("matched1", string(key))
+ return true
+ }))
+
+ assert.True(t, excludedTrie.MatchAll(b, func(key []byte, value bool) bool {
+ println("matched2", string(key))
+ return true
+ }))
+
+ assert.False(t, excludedTrie.MatchAll([]byte("/topics/ab"), func(key []byte, value bool) bool {
+ println("matched3", string(key))
+ return true
+ }))
+
+ assert.False(t, excludedTrie.Has(b))
+}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 0b7254c0d..ee052579c 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -74,7 +74,6 @@ type FilerOption struct {
DiskType string
AllowedOrigins []string
ExposeDirectoryData bool
- JoinExistingFiler bool
}
type FilerServer struct {
@@ -198,12 +197,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
existingNodes := fs.filer.ListExistingPeerUpdates(context.Background())
startFromTime := time.Now().Add(-filer.LogFlushInterval)
- if option.JoinExistingFiler {
- startFromTime = time.Time{}
- }
if isFresh {
glog.V(0).Infof("%s bootstrap from peers %+v", option.Host, existingNodes)
- if err := fs.filer.MaybeBootstrapFromPeers(option.Host, existingNodes, startFromTime); err != nil {
+ if err := fs.filer.MaybeBootstrapFromOnePeer(option.Host, existingNodes, startFromTime); err != nil {
glog.Fatalf("%s bootstrap from %+v: %v", option.Host, existingNodes, err)
}
}
diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go
index e04994569..c1a26ca11 100644
--- a/weed/server/filer_server_handlers_proxy.go
+++ b/weed/server/filer_server_handlers_proxy.go
@@ -3,24 +3,13 @@ package weed_server
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
- "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"io"
"math/rand"
"net/http"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
-var (
- client *http.Client
-)
-
-func init() {
- client = &http.Client{Transport: &http.Transport{
- MaxIdleConns: 1024,
- MaxIdleConnsPerHost: 1024,
- }}
-}
-
func (fs *FilerServer) maybeAddVolumeJwtAuthorization(r *http.Request, fileId string, isWrite bool) {
encodedJwt := fs.maybeGetVolumeJwtAuthorizationToken(fileId, isWrite)
@@ -71,14 +60,14 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques
}
}
- proxyResponse, postErr := client.Do(proxyReq)
+ proxyResponse, postErr := util_http.GetGlobalHttpClient().Do(proxyReq)
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
w.WriteHeader(http.StatusInternalServerError)
return
}
- defer util.CloseResponse(proxyResponse)
+ defer util_http.CloseResponse(proxyResponse)
for k, v := range proxyResponse.Header {
w.Header()[k] = v
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 123b7a494..a02e6c2c1 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -71,14 +71,14 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, entry *filer.Ent
ifModifiedSinceHeader := r.Header.Get("If-Modified-Since")
if ifNoneMatchETagHeader != "" {
if util.CanonicalizeETag(etag) == util.CanonicalizeETag(ifNoneMatchETagHeader) {
- setEtag(w, etag)
+ SetEtag(w, etag)
w.WriteHeader(http.StatusNotModified)
return true
}
} else if ifModifiedSinceHeader != "" {
if t, parseError := time.Parse(http.TimeFormat, ifModifiedSinceHeader); parseError == nil {
if !t.Before(entry.Attr.Mtime) {
- setEtag(w, etag)
+ SetEtag(w, etag)
w.WriteHeader(http.StatusNotModified)
return true
}
@@ -220,13 +220,13 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.Header().Set(s3_constants.AmzTagCount, strconv.Itoa(tagCount))
}
- setEtag(w, etag)
+ SetEtag(w, etag)
filename := entry.Name()
- adjustPassthroughHeaders(w, r, filename)
+ AdjustPassthroughHeaders(w, r, filename)
totalSize := int64(entry.Size())
- if r.Method == "HEAD" {
+ if r.Method == http.MethodHead {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return
}
@@ -252,7 +252,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
- processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
+ ProcessRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
if offset+size <= int64(len(entry.Content)) {
return func(writer io.Writer) error {
_, err := writer.Write(entry.Content[offset : offset+size])
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 0531527bb..56f0f9cb4 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -31,8 +31,8 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
path = path[:len(path)-1]
}
- limit, limit_err := strconv.Atoi(r.FormValue("limit"))
- if limit_err != nil {
+ limit, limitErr := strconv.Atoi(r.FormValue("limit"))
+ if limitErr != nil {
limit = fs.option.DirListingLimit
}
@@ -62,6 +62,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
if r.Header.Get("Accept") == "application/json" {
writeJsonQuiet(w, r, http.StatusOK, struct {
+ Version string
Path string
Entries interface{}
Limit int
@@ -69,6 +70,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
ShouldDisplayLoadMore bool
EmptyFolder bool
}{
+ util.Version(),
path,
entries,
limit,
@@ -80,6 +82,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
}
err = ui.StatusTpl.Execute(w, struct {
+ Version string
Path string
Breadcrumbs []ui.Breadcrumb
Entries interface{}
@@ -89,6 +92,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
EmptyFolder bool
ShowDirectoryDelete bool
}{
+ util.Version(),
path,
ui.ToBreadcrumb(path),
entries,
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index b186fd34e..f0f756e34 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -18,6 +18,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@@ -120,7 +121,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, conte
fs.autoChunk(ctx, w, r, contentLength, so)
}
- util.CloseRequest(r)
+ util_http.CloseRequest(r)
}
@@ -211,7 +212,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
objectPath = objectPath[0 : len(objectPath)-1]
}
- err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil)
+ err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil, 0)
if err != nil {
if err == filer_pb.ErrNotFound {
writeJsonQuiet(w, r, http.StatusNoContent, nil)
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 2698e2209..1c7ed0c3c 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -39,7 +39,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
var reply *FilerPostResult
var err error
var md5bytes []byte
- if r.Method == "POST" {
+ if r.Method == http.MethodPost {
if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") {
reply, err = fs.mkdir(ctx, w, r, so)
} else {
@@ -148,6 +148,10 @@ func skipCheckParentDirEntry(r *http.Request) bool {
return r.URL.Query().Get("skipCheckParentDir") == "true"
}
+func isS3Request(r *http.Request) bool {
+ return r.Header.Get(s3_constants.AmzAuthType) != "" || r.Header.Get("X-Amz-Date") != ""
+}
+
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
// detect file mode
@@ -266,7 +270,12 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
}
}
- if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength); dbErr != nil {
+ dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength)
+ // In test_bucket_listv2_delimiter_basic, the valid object key is the parent folder
+ if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && isS3Request(r) {
+ dbErr = fs.filer.CreateEntry(ctx, entry, false, false, nil, true, so.MaxFileNameLength)
+ }
+ if dbErr != nil {
replyerr = dbErr
filerResult.Error = dbErr.Error()
glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
@@ -299,8 +308,14 @@ func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAs
PairMap: nil,
Jwt: auth,
}
+
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil {
+ return uploaderErr
+ }
+
var uploadErr error
- uploadResult, uploadErr, _ = operation.Upload(reader, uploadOption)
+ uploadResult, uploadErr, _ = uploader.Upload(reader, uploadOption)
if uploadErr != nil {
return uploadErr
}
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
index 6cf7d65b1..f8d129bf3 100644
--- a/weed/server/filer_server_handlers_write_cipher.go
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -53,7 +53,13 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
PairMap: pu.PairMap,
Jwt: auth,
}
- uploadResult, uploadError := operation.UploadData(uncompressedData, uploadOption)
+
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil {
+ return nil, fmt.Errorf("uploader initialization error: %v", uploaderErr)
+ }
+
+ uploadResult, uploadError := uploader.UploadData(uncompressedData, uploadOption)
if uploadError != nil {
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
}
diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go
index 8c8eba078..d0d1575cf 100644
--- a/weed/server/filer_server_handlers_write_upload.go
+++ b/weed/server/filer_server_handlers_write_upload.go
@@ -158,7 +158,13 @@ func (fs *FilerServer) doUpload(urlLocation string, limitedReader io.Reader, fil
PairMap: pairMap,
Jwt: auth,
}
- uploadResult, err, data := operation.Upload(limitedReader, uploadOption)
+
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ return nil, err, []byte{}
+ }
+
+ uploadResult, err, data := uploader.Upload(limitedReader, uploadOption)
if uploadResult != nil && uploadResult.RetryCount > 0 {
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUploadRetry).Add(float64(uploadResult.RetryCount))
}
diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go
index abb6cce9a..638638196 100644
--- a/weed/server/filer_ui/breadcrumb.go
+++ b/weed/server/filer_ui/breadcrumb.go
@@ -13,6 +13,9 @@ type Breadcrumb struct {
func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) {
parts := strings.Split(fullpath, "/")
+ if fullpath == "/" {
+ parts = []string{""}
+ }
for i := 0; i < len(parts); i++ {
name := parts[i]
diff --git a/weed/server/filer_ui/breadcrumb_test.go b/weed/server/filer_ui/breadcrumb_test.go
new file mode 100644
index 000000000..6e42541cb
--- /dev/null
+++ b/weed/server/filer_ui/breadcrumb_test.go
@@ -0,0 +1,86 @@
+package filer_ui
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestToBreadcrumb(t *testing.T) {
+ type args struct {
+ fullpath string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantCrumbs []Breadcrumb
+ }{
+ {
+ name: "empty",
+ args: args{
+ fullpath: "",
+ },
+ wantCrumbs: []Breadcrumb{
+ {
+ Name: "/",
+ Link: "/",
+ },
+ },
+ },
+ {
+ name: "test1",
+ args: args{
+ fullpath: "/",
+ },
+ wantCrumbs: []Breadcrumb{
+ {
+ Name: "/",
+ Link: "/",
+ },
+ },
+ },
+ {
+ name: "test2",
+ args: args{
+ fullpath: "/abc",
+ },
+ wantCrumbs: []Breadcrumb{
+ {
+ Name: "/",
+ Link: "/",
+ },
+ {
+ Name: "abc",
+ Link: "/abc/",
+ },
+ },
+ },
+ {
+ name: "test3",
+ args: args{
+ fullpath: "/abc/def",
+ },
+ wantCrumbs: []Breadcrumb{
+ {
+ Name: "/",
+ Link: "/",
+ },
+ {
+ Name: "abc",
+ Link: "/abc/",
+ },
+ {
+ Name: "def",
+ Link: "/abc/def/",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if gotCrumbs := ToBreadcrumb(tt.args.fullpath); !reflect.DeepEqual(gotCrumbs, tt.wantCrumbs) {
+ t.Errorf("ToBreadcrumb() = %v, want %v", gotCrumbs, tt.wantCrumbs)
+ }
+ })
+ }
+}
diff --git a/weed/server/filer_ui/filer.html b/weed/server/filer_ui/filer.html
index 28425f180..627f3ba77 100644
--- a/weed/server/filer_ui/filer.html
+++ b/weed/server/filer_ui/filer.html
@@ -1,7 +1,7 @@
<!DOCTYPE html>
<html>
<head>
- <title>SeaweedFS Filer</title>
+ <title>SeaweedFS Filer {{ .Version }}</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="/seaweedfsstatic/bootstrap/3.3.1/css/bootstrap.min.css">
<style>
@@ -82,7 +82,7 @@
<div class="page-header">
<h1>
<a href="https://github.com/seaweedfs/seaweedfs"><img src="/seaweedfsstatic/seaweed50x50.png"></img></a>
- SeaweedFS Filer
+ SeaweedFS Filer <small>{{ .Version }}</small>
</h1>
</div>
<div class="row">
@@ -99,7 +99,7 @@
{{ range $entry := .Breadcrumbs }}
<li><a href="{{ printpath $entry.Link }}">
{{ $entry.Name }}
- </li></a>
+ </a></li>
{{ end }}
</ol>
</div>
diff --git a/weed/server/master_grpc_server_assign.go b/weed/server/master_grpc_server_assign.go
index 7455c9ea4..4f95b4ff6 100644
--- a/weed/server/master_grpc_server_assign.go
+++ b/weed/server/master_grpc_server_assign.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ "github.com/seaweedfs/seaweedfs/weed/stats"
"time"
"github.com/seaweedfs/raft"
@@ -69,7 +70,12 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb,
}
+ if !ms.Topo.DataCenterExists(option.DataCenter) {
+ return nil, fmt.Errorf("data center %v not found in topology", option.DataCenter)
+ }
+
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
+ vl.SetLastGrowCount(req.WritableVolumeCount)
var (
lastErr error
@@ -80,18 +86,17 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
for time.Now().Sub(startTime) < maxTimeout {
fid, count, dnList, shouldGrow, err := ms.Topo.PickForWrite(req.Count, option, vl)
if shouldGrow && !vl.HasGrowRequest() {
- // if picked volume is almost full, trigger a volume-grow request
- if ms.Topo.AvailableSpaceFor(option) <= 0 {
- return nil, fmt.Errorf("no free volumes left for " + option.String())
+ if err != nil && ms.Topo.AvailableSpaceFor(option) <= 0 {
+ err = fmt.Errorf("%s and no free volumes left for %s", err.Error(), option.String())
}
vl.AddGrowRequest()
ms.volumeGrowthRequestChan <- &topology.VolumeGrowRequest{
Option: option,
- Count: int(req.WritableVolumeCount),
+ Count: req.WritableVolumeCount,
}
}
if err != nil {
- // glog.Warningf("PickForWrite %+v: %v", req, err)
+ stats.MasterPickForWriteErrorCounter.Inc()
lastErr = err
time.Sleep(200 * time.Millisecond)
continue
diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go
index 503da7fd4..3cad627db 100644
--- a/weed/server/master_grpc_server_volume.go
+++ b/weed/server/master_grpc_server_volume.go
@@ -3,11 +3,14 @@ package weed_server
import (
"context"
"fmt"
+ "math/rand"
"reflect"
"strings"
"sync"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/topology"
+
"github.com/seaweedfs/raft"
"github.com/seaweedfs/seaweedfs/weed/glog"
@@ -18,8 +21,39 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
+func (ms *MasterServer) DoAutomaticVolumeGrow(req *topology.VolumeGrowRequest) {
+ glog.V(1).Infoln("starting automatic volume grow")
+ start := time.Now()
+ newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
+ glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
+ if err != nil {
+ glog.V(1).Infof("automatic volume grow failed: %+v", err)
+ return
+ }
+ for _, newVidLocation := range newVidLocations {
+ ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: newVidLocation})
+ }
+}
+
func (ms *MasterServer) ProcessGrowRequest() {
go func() {
+ for {
+ time.Sleep(14*time.Minute + time.Duration(120*rand.Float32())*time.Second)
+ if !ms.Topo.IsLeader() {
+ continue
+ }
+ for _, vl := range ms.Topo.ListVolumeLyauts() {
+ if !vl.HasGrowRequest() && vl.ShouldGrowVolumes(&topology.VolumeGrowOption{}) {
+ vl.AddGrowRequest()
+ ms.volumeGrowthRequestChan <- &topology.VolumeGrowRequest{
+ Option: vl.ToGrowOption(),
+ Count: vl.GetLastGrowCount(),
+ }
+ }
+ }
+ }
+ }()
+ go func() {
filter := sync.Map{}
for {
req, ok := <-ms.volumeGrowthRequestChan
@@ -27,9 +61,13 @@ func (ms *MasterServer) ProcessGrowRequest() {
break
}
+ option := req.Option
+ vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
+
if !ms.Topo.IsLeader() {
//discard buffered requests
time.Sleep(time.Second * 1)
+ vl.DoneGrowRequest()
continue
}
@@ -42,28 +80,15 @@ func (ms *MasterServer) ProcessGrowRequest() {
return !found
})
- option := req.Option
- vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
-
// not atomic but it's okay
if !found && vl.ShouldGrowVolumes(option) {
filter.Store(req, nil)
// we have lock called inside vg
- go func() {
- glog.V(1).Infoln("starting automatic volume grow")
- start := time.Now()
- newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
- glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
- if err == nil {
- for _, newVidLocation := range newVidLocations {
- ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: newVidLocation})
- }
- }
+ go func(req *topology.VolumeGrowRequest, vl *topology.VolumeLayout) {
+ ms.DoAutomaticVolumeGrow(req)
vl.DoneGrowRequest()
-
filter.Delete(req)
- }()
-
+ }(req, vl)
} else {
glog.V(4).Infoln("discard volume grow request")
time.Sleep(time.Millisecond * 211)
@@ -91,6 +116,7 @@ func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupV
Url: loc.Url,
PublicUrl: loc.PublicUrl,
DataCenter: loc.DataCenter,
+ GrpcPort: uint32(loc.GrpcPort),
})
}
var auth string
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index 3499a2e13..44a1664c0 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -30,6 +30,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
const (
@@ -92,15 +93,15 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se
v.SetDefault("master.replication.treat_replication_as_minimums", false)
replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums")
- v.SetDefault("master.volume_growth.copy_1", 7)
- v.SetDefault("master.volume_growth.copy_2", 6)
- v.SetDefault("master.volume_growth.copy_3", 3)
- v.SetDefault("master.volume_growth.copy_other", 1)
- v.SetDefault("master.volume_growth.threshold", 0.9)
- topology.VolumeGrowStrategy.Copy1Count = v.GetInt("master.volume_growth.copy_1")
- topology.VolumeGrowStrategy.Copy2Count = v.GetInt("master.volume_growth.copy_2")
- topology.VolumeGrowStrategy.Copy3Count = v.GetInt("master.volume_growth.copy_3")
- topology.VolumeGrowStrategy.CopyOtherCount = v.GetInt("master.volume_growth.copy_other")
+ v.SetDefault("master.volume_growth.copy_1", topology.VolumeGrowStrategy.Copy1Count)
+ v.SetDefault("master.volume_growth.copy_2", topology.VolumeGrowStrategy.Copy2Count)
+ v.SetDefault("master.volume_growth.copy_3", topology.VolumeGrowStrategy.Copy3Count)
+ v.SetDefault("master.volume_growth.copy_other", topology.VolumeGrowStrategy.CopyOtherCount)
+ v.SetDefault("master.volume_growth.threshold", topology.VolumeGrowStrategy.Threshold)
+ topology.VolumeGrowStrategy.Copy1Count = v.GetUint32("master.volume_growth.copy_1")
+ topology.VolumeGrowStrategy.Copy2Count = v.GetUint32("master.volume_growth.copy_2")
+ topology.VolumeGrowStrategy.Copy3Count = v.GetUint32("master.volume_growth.copy_3")
+ topology.VolumeGrowStrategy.CopyOtherCount = v.GetUint32("master.volume_growth.copy_other")
topology.VolumeGrowStrategy.Threshold = v.GetFloat64("master.volume_growth.threshold")
var preallocateSize int64
@@ -185,22 +186,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
raftServerName = fmt.Sprintf("[%s]", ms.Topo.RaftServer.Name())
} else if raftServer.RaftHashicorp != nil {
ms.Topo.HashicorpRaft = raftServer.RaftHashicorp
- leaderCh := raftServer.RaftHashicorp.LeaderCh()
- prevLeader, _ := ms.Topo.HashicorpRaft.LeaderWithID()
raftServerName = ms.Topo.HashicorpRaft.String()
- go func() {
- for {
- select {
- case isLeader := <-leaderCh:
- ms.Topo.RaftServerAccessLock.RLock()
- leader, _ := ms.Topo.HashicorpRaft.LeaderWithID()
- ms.Topo.RaftServerAccessLock.RUnlock()
- glog.V(0).Infof("is leader %+v change event: %+v => %+v", isLeader, prevLeader, leader)
- stats.MasterLeaderChangeCounter.WithLabelValues(fmt.Sprintf("%+v", leader)).Inc()
- prevLeader = leader
- }
- }
- }()
}
ms.Topo.RaftServerAccessLock.Unlock()
@@ -256,7 +242,7 @@ func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
}
director(req)
}
- proxy.Transport = util.Transport
+ proxy.Transport = util_http.GetGlobalHttpClient().GetClientTransport()
proxy.ServeHTTP(w, r)
}
}
diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go
index e4188420d..5e17bcca8 100644
--- a/weed/server/master_server_handlers.go
+++ b/weed/server/master_server_handlers.go
@@ -2,12 +2,13 @@ package weed_server
import (
"fmt"
- "github.com/seaweedfs/seaweedfs/weed/glog"
"net/http"
"strconv"
"strings"
"time"
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats"
@@ -74,7 +75,10 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo
machines := ms.Topo.Lookup(collection, volumeId)
for _, loc := range machines {
locations = append(locations, operation.Location{
- Url: loc.Url(), PublicUrl: loc.PublicUrl, DataCenter: loc.GetDataCenterId(),
+ Url: loc.Url(),
+ PublicUrl: loc.PublicUrl,
+ DataCenter: loc.GetDataCenterId(),
+ GrpcPort: loc.GrpcPort,
})
}
}
@@ -82,7 +86,10 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo
machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid)
for _, loc := range machines {
locations = append(locations, operation.Location{
- Url: loc.Url, PublicUrl: loc.PublicUrl, DataCenter: loc.DataCenter,
+ Url: loc.Url,
+ PublicUrl: loc.PublicUrl,
+ DataCenter: loc.DataCenter,
+ GrpcPort: loc.GrpcPort,
})
}
err = getVidLocationsErr
@@ -107,7 +114,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
requestedCount = 1
}
- writableVolumeCount, e := strconv.Atoi(r.FormValue("writableVolumeCount"))
+ writableVolumeCount, e := strconv.ParseUint(r.FormValue("writableVolumeCount"), 10, 32)
if e != nil {
writableVolumeCount = 0
}
@@ -126,23 +133,28 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
startTime = time.Now()
)
+ if !ms.Topo.DataCenterExists(option.DataCenter) {
+ writeJsonQuiet(w, r, http.StatusBadRequest, operation.AssignResult{
+ Error: fmt.Sprintf("data center %v not found in topology", option.DataCenter),
+ })
+ return
+ }
+
for time.Now().Sub(startTime) < maxTimeout {
fid, count, dnList, shouldGrow, err := ms.Topo.PickForWrite(requestedCount, option, vl)
if shouldGrow && !vl.HasGrowRequest() {
- // if picked volume is almost full, trigger a volume-grow request
glog.V(0).Infof("dirAssign volume growth %v from %v", option.String(), r.RemoteAddr)
- if ms.Topo.AvailableSpaceFor(option) <= 0 {
- writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()})
- return
+ if err != nil && ms.Topo.AvailableSpaceFor(option) <= 0 {
+ err = fmt.Errorf("%s and no free volumes left for %s", err.Error(), option.String())
}
vl.AddGrowRequest()
ms.volumeGrowthRequestChan <- &topology.VolumeGrowRequest{
Option: option,
- Count: writableVolumeCount,
+ Count: uint32(writableVolumeCount),
}
}
if err != nil {
- // glog.Warningf("PickForWrite %+v: %v", req, err)
+ stats.MasterPickForWriteErrorCounter.Inc()
lastErr = err
time.Sleep(200 * time.Millisecond)
continue
diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go
index f40b819af..7479b5535 100644
--- a/weed/server/master_server_handlers_admin.go
+++ b/weed/server/master_server_handlers_admin.go
@@ -18,6 +18,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) {
@@ -70,7 +71,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
}
func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request) {
- count := 0
+ count := uint64(0)
option, err := ms.getVolumeGrowOption(r)
if err != nil {
writeJsonError(w, r, http.StatusNotAcceptable, err)
@@ -78,13 +79,16 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
}
glog.V(0).Infof("volumeGrowHandler received %v from %v", option.String(), r.RemoteAddr)
- if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
- if ms.Topo.AvailableSpaceFor(option) < int64(count*option.ReplicaPlacement.GetCopyCount()) {
- err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.AvailableSpaceFor(option), count*option.ReplicaPlacement.GetCopyCount())
+ if count, err = strconv.ParseUint(r.FormValue("count"), 10, 32); err == nil {
+ replicaCount := int64(count * uint64(option.ReplicaPlacement.GetCopyCount()))
+ if ms.Topo.AvailableSpaceFor(option) < replicaCount {
+ err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.AvailableSpaceFor(option), replicaCount)
+ } else if !ms.Topo.DataCenterExists(option.DataCenter) {
+ err = fmt.Errorf("data center %v not found in topology", option.DataCenter)
} else {
var newVidLocations []*master_pb.VolumeLocation
- newVidLocations, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo)
- count = len(newVidLocations)
+ newVidLocations, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, uint32(count), option, ms.Topo)
+ count = uint64(len(newVidLocations))
}
} else {
err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count"))
@@ -110,11 +114,11 @@ func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request)
location := ms.findVolumeLocation(collection, vid)
if location.Error == "" {
loc := location.Locations[rand.Intn(len(location.Locations))]
- var url string
+ url, _ := util_http.NormalizeUrl(loc.PublicUrl)
if r.URL.RawQuery != "" {
- url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery
+ url = url + r.URL.Path + "?" + r.URL.RawQuery
} else {
- url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path
+ url = url + r.URL.Path
}
http.Redirect(w, r, url, http.StatusPermanentRedirect)
} else {
diff --git a/weed/server/raft_hashicorp.go b/weed/server/raft_hashicorp.go
index d06066b93..299df323a 100644
--- a/weed/server/raft_hashicorp.go
+++ b/weed/server/raft_hashicorp.go
@@ -5,6 +5,14 @@ package weed_server
import (
"fmt"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
transport "github.com/Jille/raft-grpc-transport"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
@@ -14,13 +22,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
"google.golang.org/grpc"
- "math/rand"
- "os"
- "path"
- "path/filepath"
- "sort"
- "strings"
- "time"
)
const (
@@ -56,46 +57,61 @@ func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) {
return cfg
}
-func (s *RaftServer) UpdatePeers() {
+func (s *RaftServer) monitorLeaderLoop(updatePeers bool) {
for {
+ prevLeader, _ := s.RaftHashicorp.LeaderWithID()
select {
case isLeader := <-s.RaftHashicorp.LeaderCh():
+ leader, _ := s.RaftHashicorp.LeaderWithID()
if isLeader {
- peerLeader := string(s.serverAddr)
- existsPeerName := make(map[string]bool)
- for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
- if string(server.ID) == peerLeader {
- continue
- }
- existsPeerName[string(server.ID)] = true
- }
- for _, peer := range s.peers {
- peerName := string(peer)
- if peerName == peerLeader || existsPeerName[peerName] {
- continue
- }
- glog.V(0).Infof("adding new peer: %s", peerName)
- s.RaftHashicorp.AddVoter(
- raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
- }
- for peer := range existsPeerName {
- if _, found := s.peers[peer]; !found {
- glog.V(0).Infof("removing old peer: %s", peer)
- s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
- }
- }
- if _, found := s.peers[peerLeader]; !found {
- glog.V(0).Infof("removing old leader peer: %s", peerLeader)
- s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
+
+ if updatePeers {
+ s.updatePeers()
+ updatePeers = false
}
+
+ s.topo.DoBarrier()
+
+ stats.MasterLeaderChangeCounter.WithLabelValues(fmt.Sprintf("%+v", leader)).Inc()
+ } else {
+ s.topo.BarrierReset()
}
- return
- case <-time.After(updatePeersTimeout):
- return
+ glog.V(0).Infof("is leader %+v change event: %+v => %+v", isLeader, prevLeader, leader)
+ prevLeader = leader
}
}
}
+func (s *RaftServer) updatePeers() {
+ peerLeader := string(s.serverAddr)
+ existsPeerName := make(map[string]bool)
+ for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
+ if string(server.ID) == peerLeader {
+ continue
+ }
+ existsPeerName[string(server.ID)] = true
+ }
+ for _, peer := range s.peers {
+ peerName := string(peer)
+ if peerName == peerLeader || existsPeerName[peerName] {
+ continue
+ }
+ glog.V(0).Infof("adding new peer: %s", peerName)
+ s.RaftHashicorp.AddVoter(
+ raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
+ }
+ for peer := range existsPeerName {
+ if _, found := s.peers[peer]; !found {
+ glog.V(0).Infof("removing old peer: %s", peer)
+ s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
+ }
+ }
+ if _, found := s.peers[peerLeader]; !found {
+ glog.V(0).Infof("removing old leader peer: %s", peerLeader)
+ s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
+ }
+}
+
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
s := &RaftServer{
peers: option.Peers,
@@ -157,6 +173,8 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
if err != nil {
return nil, fmt.Errorf("raft.NewRaft: %v", err)
}
+
+ updatePeers := false
if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 {
cfg := s.AddPeersConfiguration()
// Need to get lock, in case all servers do this at the same time.
@@ -169,9 +187,11 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
}
} else {
- go s.UpdatePeers()
+ updatePeers = true
}
+ go s.monitorLeaderLoop(updatePeers)
+
ticker := time.NewTicker(c.HeartbeatTimeout * 10)
if glog.V(4) {
go func() {
diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go
index d718ecac7..4bcd808c2 100644
--- a/weed/server/raft_server.go
+++ b/weed/server/raft_server.go
@@ -2,13 +2,14 @@ package weed_server
import (
"encoding/json"
- transport "github.com/Jille/raft-grpc-transport"
"io"
"math/rand"
"os"
"path"
"time"
+ transport "github.com/Jille/raft-grpc-transport"
+
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/pb"
diff --git a/weed/server/volume_grpc_remote.go b/weed/server/volume_grpc_remote.go
index 64254b3b8..4452e019b 100644
--- a/weed/server/volume_grpc_remote.go
+++ b/weed/server/volume_grpc_remote.go
@@ -70,10 +70,15 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
PairMap: nil,
Jwt: security.EncodedJwt(req.Auth),
}
- if _, replicaWriteErr := operation.UploadData(data, uploadOption); replicaWriteErr != nil {
- if err == nil {
- err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, err)
- }
+
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil && err == nil {
+ err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, uploaderErr)
+ return
+ }
+
+ if _, replicaWriteErr := uploader.UploadData(data, uploadOption); replicaWriteErr != nil && err == nil {
+ err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, replicaWriteErr)
}
}(replica.Url)
}
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index cc364513b..15d639f49 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -27,6 +27,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
@@ -81,10 +82,11 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
if vs.ReadMode == "proxy" {
// proxy client request to target server
- u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].Url))
+ rawURL, _ := util_http.NormalizeUrl(lookupResult.Locations[0].Url)
+ u, _ := url.Parse(rawURL)
r.URL.Host = u.Host
r.URL.Scheme = u.Scheme
- request, err := http.NewRequest("GET", r.URL.String(), nil)
+ request, err := http.NewRequest(http.MethodGet, r.URL.String(), nil)
if err != nil {
glog.V(0).Infof("failed to instance http request of url %s: %v", r.URL.String(), err)
InternalError(w)
@@ -96,13 +98,13 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
- response, err := client.Do(request)
+ response, err := util_http.GetGlobalHttpClient().Do(request)
if err != nil {
glog.V(0).Infof("request remote url %s: %v", r.URL.String(), err)
InternalError(w)
return
}
- defer util.CloseResponse(response)
+ defer util_http.CloseResponse(response)
// proxy target response to client
for k, vv := range response.Header {
for _, v := range vv {
@@ -116,7 +118,8 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
} else {
// redirect
- u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
+ rawURL, _ := util_http.NormalizeUrl(lookupResult.Locations[0].PublicUrl)
+ u, _ := url.Parse(rawURL)
u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid)
arg := url.Values{}
if c := r.FormValue("collection"); c != "" {
@@ -186,7 +189,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusNotModified)
return
}
- setEtag(w, n.Etag())
+ SetEtag(w, n.Etag())
if n.HasPairs() {
pairMap := make(map[string]string)
@@ -253,7 +256,7 @@ func shouldAttemptStreamWrite(hasLocalVolume bool, ext string, r *http.Request)
if len(ext) > 0 {
ext = strings.ToLower(ext)
}
- if r.Method == "HEAD" {
+ if r.Method == http.MethodHead {
return true, true
}
_, _, _, shouldResize := shouldResizeImages(ext, r)
@@ -377,14 +380,14 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
}
w.Header().Set("Accept-Ranges", "bytes")
- adjustPassthroughHeaders(w, r, filename)
+ AdjustPassthroughHeaders(w, r, filename)
- if r.Method == "HEAD" {
+ if r.Method == http.MethodHead {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return nil
}
- return processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
+ return ProcessRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
return func(writer io.Writer) error {
if _, e = rs.Seek(offset, 0); e != nil {
return e
@@ -406,14 +409,14 @@ func (vs *VolumeServer) streamWriteResponseContent(filename string, mimeType str
w.Header().Set("Content-Type", mimeType)
}
w.Header().Set("Accept-Ranges", "bytes")
- adjustPassthroughHeaders(w, r, filename)
+ AdjustPassthroughHeaders(w, r, filename)
- if r.Method == "HEAD" {
+ if r.Method == http.MethodHead {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return
}
- processRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
+ ProcessRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) {
return func(writer io.Writer) error {
return vs.store.ReadVolumeNeedleDataInto(volumeId, n, readOption, writer, offset, size)
}, nil
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 6e151bf80..7f0fcc871 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -53,7 +53,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// http 204 status code does not allow body
if writeError == nil && isUnchanged {
- setEtag(w, reqNeedle.Etag())
+ SetEtag(w, reqNeedle.Etag())
w.WriteHeader(http.StatusNoContent)
return
}
@@ -65,7 +65,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ret.Size = uint32(originalSize)
ret.ETag = reqNeedle.Etag()
ret.Mime = string(reqNeedle.Mime)
- setEtag(w, ret.ETag)
+ SetEtag(w, ret.ETag)
w.Header().Set("Content-MD5", contentMd5)
writeJsonQuiet(w, r, httpStatus, ret)
}
@@ -147,7 +147,7 @@ func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Re
}
}
-func setEtag(w http.ResponseWriter, etag string) {
+func SetEtag(w http.ResponseWriter, etag string) {
if etag != "" {
if strings.HasPrefix(etag, "\"") {
w.Header().Set("ETag", etag)
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index 97d51dad7..dbe6dfed5 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -99,6 +99,7 @@ type FileInfo struct {
modifiedTime time.Time
etag string
isDirectory bool
+ err error
}
func (fi *FileInfo) Name() string { return fi.name }
@@ -109,6 +110,9 @@ func (fi *FileInfo) IsDir() bool { return fi.isDirectory }
func (fi *FileInfo) Sys() interface{} { return nil }
func (fi *FileInfo) ETag(ctx context.Context) (string, error) {
+ if fi.err != nil {
+ return "", fi.err
+ }
return fi.etag, nil
}
@@ -269,7 +273,10 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f
fi, err := fs.stat(ctx, fullFilePath)
if err != nil {
- return nil, os.ErrNotExist
+ if err == os.ErrNotExist {
+ return nil, err
+ }
+ return &WebDavFile{fs: fs}, nil
}
if !strings.HasSuffix(fullFilePath, "/") && fi.IsDir() {
fullFilePath += "/"
@@ -365,12 +372,16 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
var fi FileInfo
entry, err := filer_pb.GetEntry(fs, fullpath)
+ if err != nil {
+ if err == filer_pb.ErrNotFound {
+ return nil, os.ErrNotExist
+ }
+ fi.err = err
+ return &fi, nil
+ }
if entry == nil {
return nil, os.ErrNotExist
}
- if err != nil {
- return nil, err
- }
fi.size = int64(filer.FileSize(entry))
fi.name = string(fullpath)
fi.mode = os.FileMode(entry.Attributes.FileMode)
@@ -392,8 +403,13 @@ func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo,
}
func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
+ uploader, uploaderErr := operation.NewUploader()
+ if uploaderErr != nil {
+ glog.V(0).Infof("upload data %v: %v", f.name, uploaderErr)
+ return nil, fmt.Errorf("upload data: %v", uploaderErr)
+ }
- fileId, uploadResult, flushErr, _ := operation.UploadWithRetry(
+ fileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
f.fs,
&filer_pb.AssignVolumeRequest{
Count: 1,
@@ -509,7 +525,9 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
func (f *WebDavFile) Close() error {
glog.V(2).Infof("WebDavFileSystem.Close %v", f.name)
-
+ if f.bufWriter == nil {
+ return nil
+ }
err := f.bufWriter.Close()
if f.entry != nil {
diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go
index 17ba63cfe..217e5750e 100644
--- a/weed/shell/command_ec_balance.go
+++ b/weed/shell/command_ec_balance.go
@@ -455,16 +455,20 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
sortEcNodesByFreeslotsDescending(possibleDestinationEcNodes)
-
+ skipReason := ""
for _, destEcNode := range possibleDestinationEcNodes {
+
if destEcNode.info.Id == existingLocation.info.Id {
continue
}
if destEcNode.freeEcSlot <= 0 {
+ skipReason += fmt.Sprintf(" Skipping %s because it has no free slots\n", destEcNode.info.Id)
continue
}
if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode {
+ skipReason += fmt.Sprintf(" Skipping %s because it %d >= avernageShards (%d)\n",
+ destEcNode.info.Id, findEcVolumeShards(destEcNode, vid).ShardIdCount(), averageShardsPerEcNode)
continue
}
@@ -477,7 +481,7 @@ func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode
return nil
}
-
+ fmt.Printf("WARNING: Could not find suitable taget node for %d.%d:\n%s", vid, shardId, skipReason)
return nil
}
diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go
index 2131c5649..a4dfac67c 100644
--- a/weed/shell/command_ec_rebuild.go
+++ b/weed/shell/command_ec_rebuild.go
@@ -224,7 +224,7 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
Collection: collection,
ShardIds: []uint32{uint32(shardId)},
CopyEcxFile: needEcxFile,
- CopyEcjFile: needEcxFile,
+ CopyEcjFile: true,
CopyVifFile: needEcxFile,
SourceDataNode: ecNodes[0].info.Id,
})
diff --git a/weed/shell/command_fs_merge_volumes.go b/weed/shell/command_fs_merge_volumes.go
index 4a6048a43..b77feb8e3 100644
--- a/weed/shell/command_fs_merge_volumes.go
+++ b/weed/shell/command_fs_merge_volumes.go
@@ -19,14 +19,10 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
-)
-
-var (
- client *http.Client
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func init() {
- client = &http.Client{}
Commands = append(Commands, &commandFsMergeVolumes{})
}
@@ -104,7 +100,7 @@ func (c *commandFsMergeVolumes) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
- defer client.CloseIdleConnections()
+ defer util_http.GetGlobalHttpClient().CloseIdleConnections()
return commandEnv.WithFilerClient(false, func(filerClient filer_pb.SeaweedFilerClient) error {
return filer_pb.TraverseBfs(commandEnv, util.FullPath(dir), func(parentPath util.FullPath, entry *filer_pb.Entry) {
@@ -304,7 +300,7 @@ func moveChunk(chunk *filer_pb.FileChunk, toVolumeId needle.VolumeId, masterClie
if err != nil {
return err
}
- defer util.CloseResponse(resp)
+ defer util_http.CloseResponse(resp)
defer reader.Close()
var filename string
@@ -322,7 +318,12 @@ func moveChunk(chunk *filer_pb.FileChunk, toVolumeId needle.VolumeId, masterClie
isCompressed := resp.Header.Get("Content-Encoding") == "gzip"
md5 := resp.Header.Get("Content-MD5")
- _, err, _ = operation.Upload(reader, &operation.UploadOption{
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ return err
+ }
+
+ _, err, _ = uploader.Upload(reader, &operation.UploadOption{
UploadUrl: uploadURL,
Filename: filename,
IsInputCompressed: isCompressed,
@@ -342,18 +343,18 @@ func moveChunk(chunk *filer_pb.FileChunk, toVolumeId needle.VolumeId, masterClie
func readUrl(fileUrl string) (*http.Response, io.ReadCloser, error) {
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
if err != nil {
return nil, nil, err
}
req.Header.Add("Accept-Encoding", "gzip")
- r, err := client.Do(req)
+ r, err := util_http.GetGlobalHttpClient().Do(req)
if err != nil {
return nil, nil, err
}
if r.StatusCode >= 400 {
- util.CloseResponse(r)
+ util_http.CloseResponse(r)
return nil, nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
}
diff --git a/weed/shell/command_fs_verify.go b/weed/shell/command_fs_verify.go
index 32d498202..47052cca0 100644
--- a/weed/shell/command_fs_verify.go
+++ b/weed/shell/command_fs_verify.go
@@ -1,6 +1,7 @@
package shell
import (
+ "bytes"
"context"
"flag"
"fmt"
@@ -30,6 +31,7 @@ type commandFsVerify struct {
volumeServers []pb.ServerAddress
volumeIds map[uint32][]pb.ServerAddress
verbose *bool
+ metadataFromLog *bool
concurrency *int
modifyTimeAgoAtSec int64
writer io.Writer
@@ -56,7 +58,7 @@ func (c *commandFsVerify) Do(args []string, commandEnv *CommandEnv, writer io.Wr
c.verbose = fsVerifyCommand.Bool("v", false, "print out each processed files")
modifyTimeAgo := fsVerifyCommand.Duration("modifyTimeAgo", 0, "only include files after this modify time to verify")
c.concurrency = fsVerifyCommand.Int("concurrency", 0, "number of parallel verification per volume server")
-
+ c.metadataFromLog = fsVerifyCommand.Bool("metadataFromLog", false, "Using filer log to get metadata")
if err = fsVerifyCommand.Parse(args); err != nil {
return err
}
@@ -88,14 +90,19 @@ func (c *commandFsVerify) Do(args []string, commandEnv *CommandEnv, writer io.Wr
defer close(c.waitChan[volumeServerStr])
}
}
-
- fCount, eConut, terr := c.verifyTraverseBfs(path)
- if terr == nil {
- fmt.Fprintf(writer, "verified %d files, error %d files \n", fCount, eConut)
+ var fCount, eCount uint64
+ if *c.metadataFromLog {
+ var wg sync.WaitGroup
+ fCount, eCount, err = c.verifyProcessMetadata(path, &wg)
+ wg.Wait()
+ if err != nil {
+ return err
+ }
+ } else {
+ fCount, eCount, err = c.verifyTraverseBfs(path)
}
-
- return terr
-
+ fmt.Fprintf(writer, "verified %d files, error %d files \n", fCount, eCount)
+ return err
}
func (c *commandFsVerify) collectVolumeIds() error {
@@ -117,7 +124,7 @@ func (c *commandFsVerify) collectVolumeIds() error {
return nil
}
-func (c *commandFsVerify) verifyEntry(volumeServer pb.ServerAddress, fileId *filer_pb.FileId) error {
+func (c *commandFsVerify) verifyChunk(volumeServer pb.ServerAddress, fileId *filer_pb.FileId) error {
err := operation.WithVolumeServerClient(false, volumeServer, c.env.option.GrpcDialOption,
func(client volume_server_pb.VolumeServerClient) error {
_, err := client.VolumeNeedleStatus(context.Background(),
@@ -138,6 +145,126 @@ type ItemEntry struct {
path util.FullPath
}
+func (c *commandFsVerify) verifyProcessMetadata(path string, wg *sync.WaitGroup) (fileCount uint64, errCount uint64, err error) {
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+ if resp.EventNotification.NewEntry == nil {
+ return nil
+ }
+ chunkCount := len(message.NewEntry.Chunks)
+ if chunkCount == 0 {
+ return nil
+ }
+ entryPath := fmt.Sprintf("%s/%s", message.NewParentPath, message.NewEntry.Name)
+ errorChunksCount := atomic.NewUint64(0)
+ if !c.verifyEntry(entryPath, message.NewEntry.Chunks, errorChunksCount, wg) {
+ if err = c.env.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
+ entryResp, errReq := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{
+ Directory: message.NewParentPath,
+ Name: message.NewEntry.Name,
+ })
+ if errReq != nil {
+ if strings.HasSuffix(errReq.Error(), "no entry is found in filer store") {
+ return nil
+ }
+ return errReq
+ }
+ if entryResp.Entry.Attributes.Mtime == message.NewEntry.Attributes.Mtime &&
+ bytes.Equal(entryResp.Entry.Attributes.Md5, message.NewEntry.Attributes.Md5) {
+ fmt.Fprintf(c.writer, "file: %s needles:%d failed:%d\n", entryPath, chunkCount, errorChunksCount.Load())
+ errCount++
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ return nil
+ }
+ if *c.verbose {
+ fmt.Fprintf(c.writer, "file: %s needles:%d verifed\n", entryPath, chunkCount)
+ }
+ fileCount++
+ return nil
+ }
+ metadataFollowOption := &pb.MetadataFollowOption{
+ ClientName: "shell_verify",
+ ClientId: util.RandomInt32(),
+ ClientEpoch: 0,
+ SelfSignature: 0,
+ PathPrefix: path,
+ AdditionalPathPrefixes: nil,
+ DirectoriesToWatch: nil,
+ StartTsNs: time.Now().Add(-1 * time.Second * time.Duration(c.modifyTimeAgoAtSec)).UnixNano(),
+ StopTsNs: time.Now().UnixNano(),
+ EventErrorType: pb.DontLogError,
+ }
+ return fileCount, errCount, pb.FollowMetadata(c.env.option.FilerAddress, c.env.option.GrpcDialOption, metadataFollowOption, processEventFn)
+}
+
+func (c *commandFsVerify) verifyEntry(path string, chunks []*filer_pb.FileChunk, errorCount *atomic.Uint64, wg *sync.WaitGroup) bool {
+ fileMsg := fmt.Sprintf("file:%s", path)
+ itemIsVerifed := atomic.NewBool(true)
+ for _, chunk := range chunks {
+ if volumeIds, ok := c.volumeIds[chunk.Fid.VolumeId]; ok {
+ for _, volumeServer := range volumeIds {
+ if *c.concurrency == 0 {
+ if err := c.verifyChunk(volumeServer, chunk.Fid); err != nil {
+ if !(*c.metadataFromLog && strings.HasSuffix(err.Error(), "not found")) {
+ fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
+ fileMsg, chunk.GetFileIdString(), err)
+ }
+ if itemIsVerifed.Load() {
+ itemIsVerifed.Store(false)
+ errorCount.Add(1)
+ }
+ }
+ continue
+ }
+ c.waitChanLock.RLock()
+ waitChan, ok := c.waitChan[string(volumeServer)]
+ c.waitChanLock.RUnlock()
+ if !ok {
+ fmt.Fprintf(c.writer, "%s failed to get channel for %s fileId: %s\n",
+ string(volumeServer), fileMsg, chunk.GetFileIdString())
+ if itemIsVerifed.Load() {
+ itemIsVerifed.Store(false)
+ errorCount.Add(1)
+ }
+ continue
+ }
+ wg.Add(1)
+ waitChan <- struct{}{}
+ go func(fChunk *filer_pb.FileChunk, path string, volumeServer pb.ServerAddress, msg string) {
+ defer wg.Done()
+ if err := c.verifyChunk(volumeServer, fChunk.Fid); err != nil {
+ if !(*c.metadataFromLog && strings.HasSuffix(err.Error(), "not found")) {
+ fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
+ msg, fChunk.GetFileIdString(), err)
+ }
+ if itemIsVerifed.Load() {
+ itemIsVerifed.Store(false)
+ errorCount.Add(1)
+ }
+ }
+ <-waitChan
+ }(chunk, path, volumeServer, fileMsg)
+ }
+ } else {
+ if !*c.metadataFromLog {
+ err := fmt.Errorf("volumeId %d not found", chunk.Fid.VolumeId)
+ fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
+ fileMsg, chunk.GetFileIdString(), err)
+ }
+ if itemIsVerifed.Load() {
+ itemIsVerifed.Store(false)
+ errorCount.Add(1)
+ }
+ break
+ }
+ }
+ return itemIsVerifed.Load()
+}
+
func (c *commandFsVerify) verifyTraverseBfs(path string) (fileCount uint64, errCount uint64, err error) {
timeNowAtSec := time.Now().Unix()
return fileCount, errCount, doTraverseBfsAndSaving(c.env, c.writer, path, false,
@@ -166,63 +293,9 @@ func (c *commandFsVerify) verifyTraverseBfs(path string) (fileCount uint64, errC
for itemEntry := range outputChan {
i := itemEntry.(*ItemEntry)
itemPath := string(i.path)
- fileMsg := fmt.Sprintf("file:%s", itemPath)
- itemIsVerifed := atomic.NewBool(true)
- for _, chunk := range i.chunks {
- if volumeIds, ok := c.volumeIds[chunk.Fid.VolumeId]; ok {
- for _, volumeServer := range volumeIds {
- if *c.concurrency == 0 {
- if err = c.verifyEntry(volumeServer, chunk.Fid); err != nil {
- fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
- fileMsg, chunk.GetFileIdString(), err)
- if itemIsVerifed.Load() {
- itemIsVerifed.Store(false)
- itemErrCount.Add(1)
- }
- }
- continue
- }
- c.waitChanLock.RLock()
- waitChan, ok := c.waitChan[string(volumeServer)]
- c.waitChanLock.RUnlock()
- if !ok {
- fmt.Fprintf(c.writer, "%s failed to get channel for %s fileId: %s: %+v\n",
- string(volumeServer), fileMsg, chunk.GetFileIdString(), err)
- if itemIsVerifed.Load() {
- itemIsVerifed.Store(false)
- itemErrCount.Add(1)
- }
- continue
- }
- wg.Add(1)
- waitChan <- struct{}{}
- go func(fChunk *filer_pb.FileChunk, path string, volumeServer pb.ServerAddress, msg string) {
- defer wg.Done()
- if err = c.verifyEntry(volumeServer, fChunk.Fid); err != nil {
- fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
- msg, fChunk.GetFileIdString(), err)
- if itemIsVerifed.Load() {
- itemIsVerifed.Store(false)
- itemErrCount.Add(1)
- }
- }
- <-waitChan
- }(chunk, itemPath, volumeServer, fileMsg)
- }
- } else {
- err = fmt.Errorf("volumeId %d not found", chunk.Fid.VolumeId)
- fmt.Fprintf(c.writer, "%s failed verify fileId %s: %+v\n",
- fileMsg, chunk.GetFileIdString(), err)
- if itemIsVerifed.Load() {
- itemIsVerifed.Store(false)
- itemErrCount.Add(1)
- }
- break
- }
- }
- if itemIsVerifed.Load() {
+ if c.verifyEntry(itemPath, i.chunks, itemErrCount, &wg) {
if *c.verbose {
- fmt.Fprintf(c.writer, "%s needles:%d verifed\n", fileMsg, len(i.chunks))
+ fmt.Fprintf(c.writer, "file: %s needles:%d verifed\n", itemPath, len(i.chunks))
}
fileCount++
}
diff --git a/weed/shell/command_remote_uncache.go b/weed/shell/command_remote_uncache.go
index 34269ce4e..25e51ff74 100644
--- a/weed/shell/command_remote_uncache.go
+++ b/weed/shell/command_remote_uncache.go
@@ -7,6 +7,7 @@ import (
"io"
"path/filepath"
"strings"
+ "time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@@ -164,12 +165,12 @@ func (ff *FileFilter) matches(entry *filer_pb.Entry) bool {
}
}
if *ff.minAge != -1 {
- if entry.Attributes.Crtime < *ff.minAge {
+ if entry.Attributes.Crtime + *ff.minAge > time.Now().Unix() {
return false
}
}
if *ff.maxAge != -1 {
- if entry.Attributes.Crtime > *ff.maxAge {
+ if entry.Attributes.Crtime + *ff.maxAge < time.Now().Unix() {
return false
}
}
diff --git a/weed/shell/command_s3_bucket_quota_check.go b/weed/shell/command_s3_bucket_quota_check.go
index bc0d838f7..b130e4fad 100644
--- a/weed/shell/command_s3_bucket_quota_check.go
+++ b/weed/shell/command_s3_bucket_quota_check.go
@@ -130,7 +130,7 @@ func (c *commandS3BucketQuotaEnforce) processEachBucket(fc *filer.FilerConf, fil
} else {
fmt.Fprintf(writer, " changing bucket %s to writable.\n", entry.Name)
}
- fc.AddLocationConf(locConf)
+ fc.SetLocationConf(locConf)
}
return
diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go
index 2be61f72a..accce60ba 100644
--- a/weed/shell/command_s3_clean_uploads.go
+++ b/weed/shell/command_s3_clean_uploads.go
@@ -12,6 +12,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func init() {
@@ -90,7 +91,7 @@ func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io
deleteUrl := fmt.Sprintf("http://%s%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerAddress.ToHttpAddress(), uploadsDir, staleUpload)
fmt.Fprintf(writer, "purge %s\n", deleteUrl)
- err = util.Delete(deleteUrl, string(encodedJwt))
+ err = util_http.Delete(deleteUrl, string(encodedJwt))
if err != nil && err.Error() != "" {
return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err)
}
diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go
index 3e2512bdd..0e76f6ac9 100644
--- a/weed/shell/command_volume_check_disk.go
+++ b/weed/shell/command_volume_check_disk.go
@@ -279,7 +279,7 @@ func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *Vo
fmt.Fprintf(writer, "delete %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
}
}
- deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(
+ deleteResults, deleteErr := operation.DeleteFileIdsAtOneVolumeServer(
pb.NewServerAddressFromDataNode(target.location.dataNode),
grpcDialOption, fidList, false)
if deleteErr != nil {
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
index 1d27fae1d..acb0ee5ad 100644
--- a/weed/shell/command_volume_fsck.go
+++ b/weed/shell/command_volume_fsck.go
@@ -7,6 +7,18 @@ import (
"errors"
"flag"
"fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
@@ -14,23 +26,12 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage"
- "github.com/seaweedfs/seaweedfs/weed/storage/idx"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"golang.org/x/sync/errgroup"
- "io"
- "math"
- "net/http"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
)
func init() {
@@ -163,7 +164,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
delete(volumeIdToVInfo, volumeId)
continue
}
- err = c.collectOneVolumeFileIds(dataNodeId, volumeId, vinfo, uint64(collectModifyFromAtNs), uint64(collectCutoffFromAtNs))
+ err = c.collectOneVolumeFileIds(dataNodeId, volumeId, vinfo)
if err != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
}
@@ -198,7 +199,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
return fmt.Errorf("failed to collect file ids from filer: %v", err)
}
// volume file ids subtract filer file ids
- if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, *applyPurging); err != nil {
+ if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, *applyPurging, uint64(collectModifyFromAtNs), uint64(collectCutoffFromAtNs)); err != nil {
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
}
}
@@ -288,7 +289,7 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
return nil
}
-func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, applyPurging bool) error {
+func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, applyPurging bool, modifyFrom, cutoffFrom uint64) error {
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
volumeIdOrphanFileIds := make(map[uint32]map[string]bool)
@@ -298,7 +299,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVIn
serverReplicas := make(map[uint32][]pb.ServerAddress)
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
for volumeId, vinfo := range volumeIdToVInfo {
- inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(dataNodeId, volumeId, &vinfo)
+ inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(dataNodeId, volumeId, &vinfo, modifyFrom, cutoffFrom)
if checkErr != nil {
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
}
@@ -394,7 +395,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVIn
return nil
}
-func (c *commandVolumeFsck) collectOneVolumeFileIds(dataNodeId string, volumeId uint32, vinfo VInfo, modifyFrom uint64, cutoffFrom uint64) error {
+func (c *commandVolumeFsck) collectOneVolumeFileIds(dataNodeId string, volumeId uint32, vinfo VInfo) error {
if *c.verbose {
fmt.Fprintf(c.writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
@@ -431,29 +432,6 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(dataNodeId string, volumeId
}
buf.Write(resp.FileContent)
}
- if !vinfo.isReadOnly && (modifyFrom != 0 || cutoffFrom != 0) {
- index, err := idx.FirstInvalidIndex(buf.Bytes(),
- func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) {
- resp, err := volumeServerClient.ReadNeedleMeta(context.Background(), &volume_server_pb.ReadNeedleMetaRequest{
- VolumeId: volumeId,
- NeedleId: uint64(key),
- Offset: offset.ToActualOffset(),
- Size: int32(size),
- })
- if err != nil {
- return false, fmt.Errorf("read needle meta with id %d from volume %d: %v", key, volumeId, err)
- }
- if (modifyFrom == 0 || modifyFrom <= resp.AppendAtNs) && (cutoffFrom == 0 || resp.AppendAtNs <= cutoffFrom) {
- return true, nil
- }
- return false, nil
- })
- if err != nil {
- fmt.Fprintf(c.writer, "Failed to search for last valid index on volume %d with error %v\n", volumeId, err)
- } else {
- buf.Truncate(index * types.NeedleMapEntrySize)
- }
- }
idxFilename := getVolumeFileIdFile(c.tempFolder, dataNodeId, volumeId)
err = writeToFile(buf.Bytes(), idxFilename)
if err != nil {
@@ -552,9 +530,7 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath) {
fmt.Fprintf(c.writer, "HTTP delete request error: %v\n", err)
}
- client := &http.Client{}
-
- resp, err := client.Do(req)
+ resp, err := util_http.GetGlobalHttpClient().Do(req)
if err != nil {
fmt.Fprintf(c.writer, "DELETE fetch error: %v\n", err)
}
@@ -571,7 +547,7 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath) {
}
}
-func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(dataNodeId string, volumeId uint32, vinfo *VInfo) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(dataNodeId string, volumeId uint32, vinfo *VInfo, modifyFrom, cutoffFrom uint64) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
volumeFileIdDb := needle_map.NewMemDb()
defer volumeFileIdDb.Close()
@@ -611,9 +587,30 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(dataNodeId stri
if n.Size.IsDeleted() {
return nil
}
- orphanFileIds = append(orphanFileIds, n.Key.FileId(volumeId))
- orphanFileCount++
- orphanDataSize += uint64(n.Size)
+ if cutoffFrom > 0 || modifyFrom > 0 {
+ return operation.WithVolumeServerClient(false, vinfo.server, c.env.option.GrpcDialOption,
+ func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, err := volumeServerClient.ReadNeedleMeta(context.Background(), &volume_server_pb.ReadNeedleMetaRequest{
+ VolumeId: volumeId,
+ NeedleId: types.NeedleIdToUint64(n.Key),
+ Offset: n.Offset.ToActualOffset(),
+ Size: int32(n.Size),
+ })
+ if err != nil {
+ return fmt.Errorf("read needle meta with id %d from volume %d: %v", n.Key, volumeId, err)
+ }
+ if (modifyFrom == 0 || modifyFrom <= resp.AppendAtNs) && (cutoffFrom == 0 || resp.AppendAtNs <= cutoffFrom) {
+ orphanFileIds = append(orphanFileIds, n.Key.FileId(volumeId))
+ orphanFileCount++
+ orphanDataSize += uint64(n.Size)
+ }
+ return nil
+ })
+ } else {
+ orphanFileIds = append(orphanFileIds, n.Key.FileId(volumeId))
+ orphanFileCount++
+ orphanDataSize += uint64(n.Size)
+ }
return nil
}); err != nil {
err = fmt.Errorf("failed to AscendingVisit %+v", err)
@@ -697,7 +694,7 @@ func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []
go func(server pb.ServerAddress, fidList []string) {
defer wg.Done()
- if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
+ if deleteResults, deleteErr := operation.DeleteFileIdsAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
err = deleteErr
} else if deleteResults != nil {
resultChan <- deleteResults
diff --git a/weed/shell/command_volume_grow.go b/weed/shell/command_volume_grow.go
new file mode 100644
index 000000000..21d98dddd
--- /dev/null
+++ b/weed/shell/command_volume_grow.go
@@ -0,0 +1,64 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandGrow{})
+}
+
+type commandGrow struct {
+}
+
+func (c *commandGrow) Name() string {
+ return "volume.grow"
+}
+
+func (c *commandGrow) Help() string {
+ return `grow volumes
+
+ volume.grow [-collection=<collection name>] [-dataCenter=<data center name>]
+
+`
+}
+
+func (c *commandGrow) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ volumeVacuumCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ growCount := volumeVacuumCommand.Uint("count", 2, "")
+ collection := volumeVacuumCommand.String("collection", "", "grow this collection")
+ dataCenter := volumeVacuumCommand.String("dataCenter", "", "grow volumes only from the specified data center")
+
+ if err = volumeVacuumCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ assignRequest := &master_pb.AssignRequest{
+ Count: 0,
+ Collection: *collection,
+ WritableVolumeCount: uint32(*growCount),
+ }
+ if *dataCenter != "" {
+ assignRequest.DataCenter = *dataCenter
+ }
+
+ err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
+ _, err := client.Assign(context.Background(), assignRequest)
+
+ if err != nil {
+ return fmt.Errorf("Assign: %v", err)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go
index 57eb6fc45..bad695cd7 100644
--- a/weed/shell/command_volume_server_evacuate.go
+++ b/weed/shell/command_volume_server_evacuate.go
@@ -3,14 +3,15 @@ package shell
import (
"flag"
"fmt"
+ "io"
+ "os"
+
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"golang.org/x/exp/slices"
- "io"
- "os"
)
func init() {
@@ -219,7 +220,7 @@ func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][
})
for i := 0; i < len(otherNodes); i++ {
emptyNode := otherNodes[i]
- if freeVolumeCountfn(emptyNode.info) < 0 {
+ if freeVolumeCountfn(emptyNode.info) <= 0 {
continue
}
hasMoved, err = maybeMoveOneVolume(commandEnv, volumeReplicas, thisNode, vol, emptyNode, applyChange)
diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go
index 6932317ab..cb805b0cf 100644
--- a/weed/shell/command_volume_tier_upload.go
+++ b/weed/shell/command_volume_tier_upload.go
@@ -113,11 +113,14 @@ func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection str
return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, existingLocations[0].Url, dest, err)
}
+ if keepLocalDatFile {
+ return nil
+ }
// now the first replica has the .idx and .vif files.
// ask replicas on other volume server to delete its own local copy
for i, location := range existingLocations {
if i == 0 {
- break
+ continue
}
fmt.Printf("delete volume %d from %s\n", vid, location.Url)
err = deleteVolume(commandEnv.option.GrpcDialOption, vid, location.ServerAddress(), false)
diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go
index 4393c280c..134485946 100644
--- a/weed/stats/metrics.go
+++ b/weed/stats/metrics.go
@@ -76,7 +76,15 @@ var (
Subsystem: "master",
Name: "volume_layout_total",
Help: "Number of volumes in volume layouts",
- }, []string{"collection", "replica", "type"})
+ }, []string{"collection", "dataCenter", "type"})
+
+ MasterPickForWriteErrorCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: Namespace,
+ Subsystem: "master",
+ Name: "pick_for_write_error",
+ Help: "Counter of master pick for write error",
+ })
MasterLeaderChangeCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index b7fa82197..dd78735d2 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -60,6 +60,7 @@ func GenerateDirUuid(dir string) (dirUuidString string, err error) {
}
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
+ glog.V(4).Infof("Added new Disk %s: maxVolumes=%d", dir, maxVolumeCount)
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
@@ -138,7 +139,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne
// skip if ec volumes exists
if skipIfEcVolumesExists {
- if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
+ if util.FileExists(l.IdxDirectory + "/" + volumeName + ".ecx") {
return false
}
}
@@ -417,7 +418,6 @@ func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
}
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
-
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
@@ -426,7 +426,11 @@ func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64)
continue
}
datSize, idxSize, _ := vol.FileStat()
- unUsedSpace += volumeSizeLimit - (datSize + idxSize)
+ unUsedSpaceVolume := int64(volumeSizeLimit) - int64(datSize+idxSize)
+ glog.V(4).Infof("Volume stats for %d: volumeSizeLimit=%d, datSize=%d idxSize=%d unused=%d", vol.Id, volumeSizeLimit, datSize, idxSize, unUsedSpaceVolume)
+ if unUsedSpaceVolume >= 0 {
+ unUsedSpace += uint64(unUsedSpaceVolume)
+ }
}
return
diff --git a/weed/storage/disk_location_test.go b/weed/storage/disk_location_test.go
new file mode 100644
index 000000000..d105a477f
--- /dev/null
+++ b/weed/storage/disk_location_test.go
@@ -0,0 +1,78 @@
+package storage
+
+import (
+ "testing"
+ "time"
+
+ "github.com/seaweedfs/seaweedfs/weed/storage/backend"
+ "github.com/seaweedfs/seaweedfs/weed/storage/needle"
+ "github.com/seaweedfs/seaweedfs/weed/util"
+)
+
+type (
+ mockBackendStorageFile struct {
+ backend.DiskFile
+
+ datSize int64
+ }
+)
+
+func (df *mockBackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
+ return df.datSize, time.Now(), nil
+}
+
+type (
+ mockNeedleMapper struct {
+ NeedleMap
+
+ idxSize uint64
+ }
+)
+
+func (nm *mockNeedleMapper) IndexFileSize() (idxSize uint64) {
+ return nm.idxSize
+}
+
+func TestUnUsedSpace(t *testing.T) {
+ minFreeSpace := util.MinFreeSpace{Type: util.AsPercent, Percent: 1, Raw: "1"}
+
+ diskLocation := DiskLocation{
+ Directory: "/test/",
+ DirectoryUuid: "1234",
+ IdxDirectory: "/test/",
+ DiskType: "hdd",
+ MaxVolumeCount: 0,
+ OriginalMaxVolumeCount: 0,
+ MinFreeSpace: minFreeSpace,
+ }
+ diskLocation.volumes = make(map[needle.VolumeId]*Volume)
+
+ volumes := [3]*Volume{
+ {dir: diskLocation.Directory, dirIdx: diskLocation.IdxDirectory, Collection: "", Id: 0, DataBackend: &mockBackendStorageFile{datSize: 990}, nm: &mockNeedleMapper{idxSize: 10}},
+ {dir: diskLocation.Directory, dirIdx: diskLocation.IdxDirectory, Collection: "", Id: 1, DataBackend: &mockBackendStorageFile{datSize: 990}, nm: &mockNeedleMapper{idxSize: 10}},
+ {dir: diskLocation.Directory, dirIdx: diskLocation.IdxDirectory, Collection: "", Id: 2, DataBackend: &mockBackendStorageFile{datSize: 990}, nm: &mockNeedleMapper{idxSize: 10}},
+ }
+
+ for i, vol := range volumes {
+ diskLocation.SetVolume(needle.VolumeId(i), vol)
+ }
+
+ // Testing when there's still space
+ unUsedSpace := diskLocation.UnUsedSpace(1200)
+ if unUsedSpace != 600 {
+ t.Errorf("unUsedSpace incorrect: %d != %d", unUsedSpace, 1500)
+ }
+
+ // Testing when there's exactly 0 space
+ unUsedSpace = diskLocation.UnUsedSpace(1000)
+ if unUsedSpace != 0 {
+ t.Errorf("unUsedSpace incorrect: %d != %d", unUsedSpace, 0)
+ }
+
+ // Testing when there's negative free space
+ unUsedSpace = diskLocation.UnUsedSpace(900)
+ if unUsedSpace != 0 {
+ t.Errorf("unUsedSpace incorrect: %d != %d", unUsedSpace, 0)
+ }
+
+}
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
index b512c16a2..28de27910 100644
--- a/weed/storage/needle/needle_parse_upload.go
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -43,7 +43,7 @@ func ParseUpload(r *http.Request, sizeLimit int64, bytesBuffer *bytes.Buffer) (p
}
}
- if r.Method == "POST" {
+ if r.Method == http.MethodPost {
contentType := r.Header.Get("Content-Type")
// If content-type is explicitly set, upload the file without parsing form-data
diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go
index 5e48056f7..715b5ed80 100644
--- a/weed/topology/data_node.go
+++ b/weed/topology/data_node.go
@@ -135,12 +135,12 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu
}
func (dn *DataNode) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) {
- deltaDiskUsages := newDiskUsages()
for diskType, maxVolumeCount := range maxVolumeCounts {
if maxVolumeCount == 0 {
// the volume server may have set the max to zero
continue
}
+ deltaDiskUsages := newDiskUsages()
dt := types.ToDiskType(diskType)
currentDiskUsage := dn.diskUsages.getOrCreateDisk(dt)
currentDiskUsageMaxVolumeCount := atomic.LoadInt64(&currentDiskUsage.maxVolumeCount)
diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go
index 82c2db79c..b4a7d649c 100644
--- a/weed/topology/store_replicate.go
+++ b/weed/topology/store_replicate.go
@@ -20,6 +20,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/buffer_pool"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request, contentMd5 string) (isUnchanged bool, err error) {
@@ -105,7 +106,12 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt
BytesBuffer: bytesBuffer,
}
- _, err := operation.UploadData(n.Data, uploadOption)
+ uploader, err := operation.NewUploader()
+ if err != nil {
+ glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String())
+ return err
+ }
+ _, err = uploader.UploadData(n.Data, uploadOption)
if err != nil {
glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String())
}
@@ -144,7 +150,7 @@ func ReplicatedDelete(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOp
if len(remoteLocations) > 0 { //send to other replica locations
if err = DistributedOperation(remoteLocations, func(location operation.Location) error {
- return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt))
+ return util_http.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt))
}); err != nil {
size = 0
}
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index 0a4cb4050..6a149bd56 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -50,8 +50,11 @@ type Topology struct {
RaftServer raft.Server
RaftServerAccessLock sync.RWMutex
HashicorpRaft *hashicorpRaft.Raft
- UuidAccessLock sync.RWMutex
- UuidMap map[string][]string
+ barrierLock sync.Mutex
+ barrierDone bool
+
+ UuidAccessLock sync.RWMutex
+ UuidMap map[string][]string
}
func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {
@@ -120,6 +123,42 @@ func (t *Topology) IsLeader() bool {
return false
}
+func (t *Topology) IsLeaderAndCanRead() bool {
+ if t.RaftServer != nil {
+ return t.IsLeader()
+ } else if t.HashicorpRaft != nil {
+ return t.IsLeader() && t.DoBarrier()
+ } else {
+ return false
+ }
+}
+
+func (t *Topology) DoBarrier() bool {
+ t.barrierLock.Lock()
+ defer t.barrierLock.Unlock()
+ if t.barrierDone {
+ return true
+ }
+
+ glog.V(0).Infof("raft do barrier")
+ barrier := t.HashicorpRaft.Barrier(2 * time.Minute)
+ if err := barrier.Error(); err != nil {
+ glog.Errorf("failed to wait for barrier, error %s", err)
+ return false
+
+ }
+
+ t.barrierDone = true
+ glog.V(0).Infof("raft do barrier success")
+ return true
+}
+
+func (t *Topology) BarrierReset() {
+ t.barrierLock.Lock()
+ defer t.barrierLock.Unlock()
+ t.barrierDone = false
+}
+
func (t *Topology) Leader() (l pb.ServerAddress, err error) {
exponentialBackoff := backoff.NewExponentialBackOff()
exponentialBackoff.InitialInterval = 100 * time.Millisecond
@@ -180,6 +219,10 @@ func (t *Topology) Lookup(collection string, vid needle.VolumeId) (dataNodes []*
}
func (t *Topology) NextVolumeId() (needle.VolumeId, error) {
+ if !t.IsLeaderAndCanRead() {
+ return 0, fmt.Errorf("as leader can not read yet")
+
+ }
vid := t.GetMaxVolumeId()
next := vid.Next()
@@ -208,8 +251,8 @@ func (t *Topology) PickForWrite(requestedCount uint64, option *VolumeGrowOption,
if err != nil {
return "", 0, nil, shouldGrow, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
}
- if volumeLocationList.Length() == 0 {
- return "", 0, nil, shouldGrow, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
+ if volumeLocationList == nil || volumeLocationList.Length() == 0 {
+ return "", 0, nil, shouldGrow, fmt.Errorf("%s available for collection:%s replication:%s ttl:%s", noWritableVolumes, option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
}
nextFileId := t.Sequence.NextFileId(requestedCount)
fileId = needle.NewFileId(vid, nextFileId, rand.Uint32()).String()
@@ -285,6 +328,22 @@ func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
}
}
+func (t *Topology) DataCenterExists(dcName string) bool {
+ return dcName == "" || t.GetOrCreateDataCenter(dcName) != nil
+}
+
+func (t *Topology) GetDataCenter(dcName string) (dc *DataCenter) {
+ t.RLock()
+ defer t.RUnlock()
+ for _, c := range t.children {
+ dc = c.(*DataCenter)
+ if string(dc.Id()) == dcName {
+ return dc
+ }
+ }
+ return dc
+}
+
func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter {
t.Lock()
defer t.Unlock()
diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go
index 761649ff4..d0ecd089a 100644
--- a/weed/topology/topology_event_handling.go
+++ b/weed/topology/topology_event_handling.go
@@ -90,6 +90,11 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) {
vl.SetVolumeUnavailable(dn, v.Id)
}
+ // unregister ec shards when volume server disconnected
+ for _, s := range dn.GetEcShards() {
+ t.UnRegisterEcShards(s, dn)
+ }
+
negativeUsages := dn.GetDiskUsages().negative()
dn.UpAdjustDiskUsageDelta(negativeUsages)
dn.DeltaUpdateVolumes([]storage.VolumeInfo{}, dn.GetVolumes())
diff --git a/weed/topology/topology_info.go b/weed/topology/topology_info.go
index 120ae0d42..89f9097f6 100644
--- a/weed/topology/topology_info.go
+++ b/weed/topology/topology_info.go
@@ -42,6 +42,15 @@ func (t *Topology) ToInfo() (info TopologyInfo) {
return
}
+func (t *Topology) ListVolumeLyauts() (volumeLayouts []*VolumeLayout) {
+ for _, col := range t.collectionMap.Items() {
+ for _, volumeLayout := range col.(*Collection).storageType2VolumeLayout.Items() {
+ volumeLayouts = append(volumeLayouts, volumeLayout.(*VolumeLayout))
+ }
+ }
+ return volumeLayouts
+}
+
func (t *Topology) ToVolumeMap() interface{} {
m := make(map[string]interface{})
m["Max"] = t.diskUsages.GetMaxVolumeCount()
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index cfc31c8b1..ff516599d 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -27,14 +27,14 @@ This package is created to resolve these replica placement issues:
type VolumeGrowRequest struct {
Option *VolumeGrowOption
- Count int
+ Count uint32
}
type volumeGrowthStrategy struct {
- Copy1Count int
- Copy2Count int
- Copy3Count int
- CopyOtherCount int
+ Copy1Count uint32
+ Copy2Count uint32
+ Copy3Count uint32
+ CopyOtherCount uint32
Threshold float64
}
@@ -75,7 +75,7 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
// one replication type may need rp.GetCopyCount() actual volumes
// given copyCount, how many logical volumes to create
-func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
+func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count uint32) {
switch copyCount {
case 1:
count = VolumeGrowStrategy.Copy1Count
@@ -89,7 +89,7 @@ func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
return
}
-func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (result []*master_pb.VolumeLocation, err error) {
+func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount uint32) (result []*master_pb.VolumeLocation, err error) {
if targetCount == 0 {
targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount())
}
@@ -99,11 +99,11 @@ func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOp
}
return result, err
}
-func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (result []*master_pb.VolumeLocation, err error) {
+func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount uint32, option *VolumeGrowOption, topo *Topology) (result []*master_pb.VolumeLocation, err error) {
vg.accessLock.Lock()
defer vg.accessLock.Unlock()
- for i := 0; i < targetCount; i++ {
+ for i := uint32(0); i < targetCount; i++ {
if res, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
result = append(result, res...)
} else {
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 66f7118c9..e2a360165 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -1,7 +1,6 @@
package topology
import (
- "errors"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/stats"
"math/rand"
@@ -28,9 +27,10 @@ const (
type volumeState string
const (
- readOnlyState volumeState = "ReadOnly"
- oversizedState = "Oversized"
- crowdedState = "Crowded"
+ readOnlyState volumeState = "ReadOnly"
+ oversizedState = "Oversized"
+ crowdedState = "Crowded"
+ noWritableVolumes = "No writable volumes"
)
type stateIndicator func(copyState) bool
@@ -106,7 +106,8 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
// mapping from volume to its locations, inverted from server to volume
type VolumeLayout struct {
- growRequestCount int32
+ growRequest atomic.Bool
+ lastGrowCount atomic.Uint32
rp *super_block.ReplicaPlacement
ttl *needle.TTL
diskType types.DiskType
@@ -292,23 +293,15 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (vi
lenWriters := len(vl.writables)
if lenWriters <= 0 {
- //glog.V(0).Infoln("No more writable volumes!")
- shouldGrow = true
- return 0, 0, nil, shouldGrow, errors.New("No more writable volumes!")
+ return 0, 0, nil, true, fmt.Errorf("%s in volume layout", noWritableVolumes)
}
if option.DataCenter == "" && option.Rack == "" && option.DataNode == "" {
vid := vl.writables[rand.Intn(lenWriters)]
locationList = vl.vid2location[vid]
- if locationList != nil && locationList.Length() > 0 {
- // check whether picked file is close to full
- dn := locationList.Head()
- info, _ := dn.GetVolumesById(vid)
- if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
- shouldGrow = true
- }
- return vid, count, locationList.Copy(), shouldGrow, nil
+ if locationList == nil || len(locationList.list) == 0 {
+ return 0, 0, nil, false, fmt.Errorf("Strangely vid %s is on no machine!", vid.String())
}
- return 0, 0, nil, shouldGrow, errors.New("Strangely vid " + vid.String() + " is on no machine!")
+ return vid, count, locationList.Copy(), false, nil
}
// clone vl.writables
@@ -331,34 +324,38 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (vi
if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
continue
}
- vid, locationList = writableVolumeId, volumeLocationList.Copy()
- // check whether picked file is close to full
- info, _ := dn.GetVolumesById(writableVolumeId)
- if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
- shouldGrow = true
- }
- counter = count
+ vid, locationList, counter = writableVolumeId, volumeLocationList.Copy(), count
return
}
}
- return vid, count, locationList, true, fmt.Errorf("No writable volumes in DataCenter:%v Rack:%v DataNode:%v", option.DataCenter, option.Rack, option.DataNode)
+ return vid, count, locationList, true, fmt.Errorf("%s in DataCenter:%v Rack:%v DataNode:%v", noWritableVolumes, option.DataCenter, option.Rack, option.DataNode)
}
func (vl *VolumeLayout) HasGrowRequest() bool {
- return atomic.LoadInt32(&vl.growRequestCount) > 0
+ return vl.growRequest.Load()
}
func (vl *VolumeLayout) AddGrowRequest() {
- atomic.AddInt32(&vl.growRequestCount, 1)
+ vl.growRequest.Store(true)
}
func (vl *VolumeLayout) DoneGrowRequest() {
- atomic.AddInt32(&vl.growRequestCount, -1)
+ vl.growRequest.Store(false)
+}
+
+func (vl *VolumeLayout) SetLastGrowCount(count uint32) {
+ if vl.lastGrowCount.Load() != count {
+ vl.lastGrowCount.Store(count)
+ }
+}
+
+func (vl *VolumeLayout) GetLastGrowCount() uint32 {
+ return vl.lastGrowCount.Load()
}
func (vl *VolumeLayout) ShouldGrowVolumes(option *VolumeGrowOption) bool {
total, active, crowded := vl.GetActiveVolumeCount(option)
- stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.ReplicaPlacement.String(), "total").Set(float64(total))
- stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.ReplicaPlacement.String(), "active").Set(float64(active))
- stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.ReplicaPlacement.String(), "crowded").Set(float64(crowded))
+ stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "total").Set(float64(total))
+ stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "active").Set(float64(active))
+ stats.MasterVolumeLayout.WithLabelValues(option.Collection, option.DataCenter, "crowded").Set(float64(crowded))
//glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
return active <= crowded
}
@@ -539,6 +536,14 @@ func (vl *VolumeLayout) ToInfo() (info VolumeLayoutInfo) {
return
}
+func (vl *VolumeLayout) ToGrowOption() (option *VolumeGrowOption) {
+ option = &VolumeGrowOption{}
+ option.ReplicaPlacement = vl.rp
+ option.Ttl = vl.ttl
+ option.DiskType = vl.diskType
+ return
+}
+
func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()
diff --git a/weed/util/bytes.go b/weed/util/bytes.go
index 26da91033..482dc3a48 100644
--- a/weed/util/bytes.go
+++ b/weed/util/bytes.go
@@ -148,6 +148,12 @@ func RandomInt32() int32 {
return int32(BytesToUint32(buf))
}
+func RandomUint64() int32 {
+ buf := make([]byte, 8)
+ rand.Read(buf)
+ return int32(BytesToUint64(buf))
+}
+
func RandomBytes(byteCount int) []byte {
buf := make([]byte, byteCount)
rand.Read(buf)
diff --git a/weed/util/config.go b/weed/util/config.go
index a242742fc..7ac765ff5 100644
--- a/weed/util/config.go
+++ b/weed/util/config.go
@@ -11,6 +11,7 @@ import (
var (
ConfigurationFileDirectory DirectoryValueType
+ loadSecurityConfigOnce sync.Once
)
type DirectoryValueType string
@@ -31,6 +32,12 @@ type Configuration interface {
SetDefault(key string, value interface{})
}
+func LoadSecurityConfiguration(){
+ loadSecurityConfigOnce.Do(func() {
+ LoadConfiguration("security", false)
+ })
+}
+
func LoadConfiguration(configFileName string, required bool) (loaded bool) {
// find a filer store
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 09f813e5b..7911d03f6 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -7,7 +7,9 @@ import (
const HttpStatusCancelled = 499
var (
- VERSION_NUMBER = fmt.Sprintf("%.02f", 3.68)
+ MAJOR_VERSION = int32(3)
+ MINOR_VERSION = int32(71)
+ VERSION_NUMBER = fmt.Sprintf("%d.%02d", MAJOR_VERSION, MINOR_VERSION)
VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = ""
)
diff --git a/weed/util/http/client/http_client.go b/weed/util/http/client/http_client.go
new file mode 100644
index 000000000..d1d2f5c56
--- /dev/null
+++ b/weed/util/http/client/http_client.go
@@ -0,0 +1,201 @@
+package client
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ util "github.com/seaweedfs/seaweedfs/weed/util"
+ "github.com/spf13/viper"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ loadSecurityConfigOnce sync.Once
+)
+
+type HTTPClient struct {
+ Client *http.Client
+ Transport *http.Transport
+ expectHttpsScheme bool
+}
+
+func (httpClient *HTTPClient) Do(req *http.Request) (*http.Response, error) {
+ req.URL.Scheme = httpClient.GetHttpScheme()
+ return httpClient.Client.Do(req)
+}
+
+func (httpClient *HTTPClient) Get(url string) (resp *http.Response, err error) {
+ url, err = httpClient.NormalizeHttpScheme(url)
+ if err != nil {
+ return nil, err
+ }
+ return httpClient.Client.Get(url)
+}
+
+func (httpClient *HTTPClient) Post(url, contentType string, body io.Reader) (resp *http.Response, err error) {
+ url, err = httpClient.NormalizeHttpScheme(url)
+ if err != nil {
+ return nil, err
+ }
+ return httpClient.Client.Post(url, contentType, body)
+}
+
+func (httpClient *HTTPClient) PostForm(url string, data url.Values) (resp *http.Response, err error) {
+ url, err = httpClient.NormalizeHttpScheme(url)
+ if err != nil {
+ return nil, err
+ }
+ return httpClient.Client.PostForm(url, data)
+}
+
+func (httpClient *HTTPClient) Head(url string) (resp *http.Response, err error) {
+ url, err = httpClient.NormalizeHttpScheme(url)
+ if err != nil {
+ return nil, err
+ }
+ return httpClient.Client.Head(url)
+}
+func (httpClient *HTTPClient) CloseIdleConnections() {
+ httpClient.Client.CloseIdleConnections()
+}
+
+func (httpClient *HTTPClient) GetClientTransport() *http.Transport {
+ return httpClient.Transport
+}
+
+func (httpClient *HTTPClient) GetHttpScheme() string {
+ if httpClient.expectHttpsScheme {
+ return "https"
+ }
+ return "http"
+}
+
+func (httpClient *HTTPClient) NormalizeHttpScheme(rawURL string) (string, error) {
+ expectedScheme := httpClient.GetHttpScheme()
+
+ if !(strings.HasPrefix(rawURL, "http://") || strings.HasPrefix(rawURL, "https://")) {
+ return expectedScheme + "://" + rawURL, nil
+ }
+
+ parsedURL, err := url.Parse(rawURL)
+ if err != nil {
+ return "", err
+ }
+
+ if expectedScheme != parsedURL.Scheme {
+ parsedURL.Scheme = expectedScheme
+ }
+ return parsedURL.String(), nil
+}
+
+func NewHttpClient(clientName ClientName, opts ...HttpClientOpt) (*HTTPClient, error) {
+ httpClient := HTTPClient{}
+ httpClient.expectHttpsScheme = checkIsHttpsClientEnabled(clientName)
+ var tlsConfig *tls.Config = nil
+
+ if httpClient.expectHttpsScheme {
+ clientCertPair, err := getClientCertPair(clientName)
+ if err != nil {
+ return nil, err
+ }
+
+ clientCaCert, clientCaCertName, err := getClientCaCert(clientName)
+ if err != nil {
+ return nil, err
+ }
+
+ if clientCertPair != nil || len(clientCaCert) != 0 {
+ caCertPool, err := createHTTPClientCertPool(clientCaCert, clientCaCertName)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig = &tls.Config{
+ Certificates: []tls.Certificate{},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: false,
+ }
+
+ if clientCertPair != nil {
+ tlsConfig.Certificates = append(tlsConfig.Certificates, *clientCertPair)
+ }
+ }
+ }
+
+ httpClient.Transport = &http.Transport{
+ MaxIdleConns: 1024,
+ MaxIdleConnsPerHost: 1024,
+ TLSClientConfig: tlsConfig,
+ }
+ httpClient.Client = &http.Client{
+ Transport: httpClient.Transport,
+ }
+
+ for _, opt := range opts {
+ opt(&httpClient)
+ }
+ return &httpClient, nil
+}
+
+func getStringOptionFromSecurityConfiguration(clientName ClientName, stringOptionName string) string {
+ util.LoadSecurityConfiguration()
+ return viper.GetString(fmt.Sprintf("https.%s.%s", clientName.LowerCaseString(), stringOptionName))
+}
+
+func getBoolOptionFromSecurityConfiguration(clientName ClientName, boolOptionName string) bool {
+ util.LoadSecurityConfiguration()
+ return viper.GetBool(fmt.Sprintf("https.%s.%s", clientName.LowerCaseString(), boolOptionName))
+}
+
+func checkIsHttpsClientEnabled(clientName ClientName) bool {
+ return getBoolOptionFromSecurityConfiguration(clientName, "enabled")
+}
+
+func getFileContentFromSecurityConfiguration(clientName ClientName, fileType string) ([]byte, string, error) {
+ if fileName := getStringOptionFromSecurityConfiguration(clientName, fileType); fileName != "" {
+ fileContent, err := os.ReadFile(fileName)
+ if err != nil {
+ return nil, fileName, err
+ }
+ return fileContent, fileName, err
+ }
+ return nil, "", nil
+}
+
+func getClientCertPair(clientName ClientName) (*tls.Certificate, error) {
+ certFileName := getStringOptionFromSecurityConfiguration(clientName, "cert")
+ keyFileName := getStringOptionFromSecurityConfiguration(clientName, "key")
+ if certFileName == "" && keyFileName == "" {
+ return nil, nil
+ }
+ if certFileName != "" && keyFileName != "" {
+ clientCert, err := tls.LoadX509KeyPair(certFileName, keyFileName)
+ if err != nil {
+ return nil, fmt.Errorf("error loading client certificate and key: %s", err)
+ }
+ return &clientCert, nil
+ }
+ return nil, fmt.Errorf("error loading key pair: key `%s` and certificate `%s`", keyFileName, certFileName)
+}
+
+func getClientCaCert(clientName ClientName) ([]byte, string, error) {
+ return getFileContentFromSecurityConfiguration(clientName, "ca")
+}
+
+func createHTTPClientCertPool(certContent []byte, fileName string) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+ if len(certContent) == 0 {
+ return certPool, nil
+ }
+
+ ok := certPool.AppendCertsFromPEM(certContent)
+ if !ok {
+ return nil, fmt.Errorf("error processing certificate in %s", fileName)
+ }
+ return certPool, nil
+}
diff --git a/weed/util/http/client/http_client_interface.go b/weed/util/http/client/http_client_interface.go
new file mode 100644
index 000000000..7a2d43360
--- /dev/null
+++ b/weed/util/http/client/http_client_interface.go
@@ -0,0 +1,16 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+type HTTPClientInterface interface {
+ Do(req *http.Request) (*http.Response, error)
+ Get(url string) (resp *http.Response, err error)
+ Post(url, contentType string, body io.Reader) (resp *http.Response, err error)
+ PostForm(url string, data url.Values) (resp *http.Response, err error)
+ Head(url string) (resp *http.Response, err error)
+ CloseIdleConnections()
+}
diff --git a/weed/util/http/client/http_client_name.go b/weed/util/http/client/http_client_name.go
new file mode 100644
index 000000000..aedaebbc6
--- /dev/null
+++ b/weed/util/http/client/http_client_name.go
@@ -0,0 +1,14 @@
+package client
+
+import "strings"
+
+type ClientName int
+
+//go:generate stringer -type=ClientName -output=http_client_name_string.go
+const (
+ Client ClientName = iota
+)
+
+func (name *ClientName) LowerCaseString() string {
+ return strings.ToLower(name.String())
+}
diff --git a/weed/util/http/client/http_client_name_string.go b/weed/util/http/client/http_client_name_string.go
new file mode 100644
index 000000000..652fcdaac
--- /dev/null
+++ b/weed/util/http/client/http_client_name_string.go
@@ -0,0 +1,23 @@
+// Code generated by "stringer -type=ClientName -output=http_client_name_string.go"; DO NOT EDIT.
+
+package client
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Client-0]
+}
+
+const _ClientName_name = "Client"
+
+var _ClientName_index = [...]uint8{0, 6}
+
+func (i ClientName) String() string {
+ if i < 0 || i >= ClientName(len(_ClientName_index)-1) {
+ return "ClientName(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ClientName_name[_ClientName_index[i]:_ClientName_index[i+1]]
+}
diff --git a/weed/util/http/client/http_client_opt.go b/weed/util/http/client/http_client_opt.go
new file mode 100644
index 000000000..1ff9d533d
--- /dev/null
+++ b/weed/util/http/client/http_client_opt.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net"
+ "time"
+)
+
+type HttpClientOpt = func(clientCfg *HTTPClient)
+
+func AddDialContext(httpClient *HTTPClient) {
+ dialContext := (&net.Dialer{
+ Timeout: 10 * time.Second,
+ KeepAlive: 10 * time.Second,
+ }).DialContext
+
+ httpClient.Transport.DialContext = dialContext
+ httpClient.Client.Transport = httpClient.Transport
+}
diff --git a/weed/util/http/http_global_client_init.go b/weed/util/http/http_global_client_init.go
new file mode 100644
index 000000000..0dcb05cfd
--- /dev/null
+++ b/weed/util/http/http_global_client_init.go
@@ -0,0 +1,27 @@
+package http
+
+import (
+ "github.com/seaweedfs/seaweedfs/weed/glog"
+ util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
+)
+
+var (
+ globalHttpClient *util_http_client.HTTPClient
+)
+
+func NewGlobalHttpClient(opt ...util_http_client.HttpClientOpt) (*util_http_client.HTTPClient, error) {
+ return util_http_client.NewHttpClient(util_http_client.Client, opt...)
+}
+
+func GetGlobalHttpClient() *util_http_client.HTTPClient {
+ return globalHttpClient
+}
+
+func InitGlobalHttpClient() {
+ var err error
+
+ globalHttpClient, err = NewGlobalHttpClient()
+ if err != nil {
+ glog.Fatalf("error init global http client: %v", err)
+ }
+}
diff --git a/weed/util/http_util.go b/weed/util/http/http_global_client_util.go
index 6f6a17008..c3931a790 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http/http_global_client_util.go
@@ -1,4 +1,4 @@
-package util
+package http
import (
"compress/gzip"
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
+ "github.com/seaweedfs/seaweedfs/weed/util"
"io"
"net/http"
"net/url"
@@ -15,23 +16,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
)
-var (
- client *http.Client
- Transport *http.Transport
-)
-
-func init() {
- Transport = &http.Transport{
- MaxIdleConns: 1024,
- MaxIdleConnsPerHost: 1024,
- }
- client = &http.Client{
- Transport: Transport,
- }
-}
-
func Post(url string, values url.Values) ([]byte, error) {
- r, err := client.PostForm(url, values)
+ r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil {
return nil, err
}
@@ -57,14 +43,14 @@ func Get(url string) ([]byte, bool, error) {
}
func GetAuthenticated(url, jwt string) ([]byte, bool, error) {
- request, err := http.NewRequest("GET", url, nil)
+ request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, true, err
}
maybeAddAuth(request, jwt)
request.Header.Add("Accept-Encoding", "gzip")
- response, err := client.Do(request)
+ response, err := GetGlobalHttpClient().Do(request)
if err != nil {
return nil, true, err
}
@@ -94,7 +80,7 @@ func GetAuthenticated(url, jwt string) ([]byte, bool, error) {
}
func Head(url string) (http.Header, error) {
- r, err := client.Head(url)
+ r, err := GetGlobalHttpClient().Head(url)
if err != nil {
return nil, err
}
@@ -112,12 +98,12 @@ func maybeAddAuth(req *http.Request, jwt string) {
}
func Delete(url string, jwt string) error {
- req, err := http.NewRequest("DELETE", url, nil)
+ req, err := http.NewRequest(http.MethodDelete, url, nil)
maybeAddAuth(req, jwt)
if err != nil {
return err
}
- resp, e := client.Do(req)
+ resp, e := GetGlobalHttpClient().Do(req)
if e != nil {
return e
}
@@ -140,12 +126,12 @@ func Delete(url string, jwt string) error {
}
func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err error) {
- req, err := http.NewRequest("DELETE", url, nil)
+ req, err := http.NewRequest(http.MethodDelete, url, nil)
maybeAddAuth(req, jwt)
if err != nil {
return
}
- resp, err := client.Do(req)
+ resp, err := GetGlobalHttpClient().Do(req)
if err != nil {
return
}
@@ -159,7 +145,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err
}
func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error {
- r, err := client.PostForm(url, values)
+ r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil {
return err
}
@@ -182,7 +168,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB
}
func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) error {
- r, err := client.PostForm(url, values)
+ r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil {
return err
}
@@ -194,14 +180,14 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e
}
func DownloadFile(fileUrl string, jwt string) (filename string, header http.Header, resp *http.Response, e error) {
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
if err != nil {
return "", nil, nil, err
}
maybeAddAuth(req, jwt)
- response, err := client.Do(req)
+ response, err := GetGlobalHttpClient().Do(req)
if err != nil {
return "", nil, nil, err
}
@@ -219,14 +205,11 @@ func DownloadFile(fileUrl string, jwt string) (filename string, header http.Head
}
func Do(req *http.Request) (resp *http.Response, err error) {
- return client.Do(req)
+ return GetGlobalHttpClient().Do(req)
}
-func NormalizeUrl(url string) string {
- if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
- return url
- }
- return "http://" + url
+func NormalizeUrl(url string) (string, error) {
+ return GetGlobalHttpClient().NormalizeHttpScheme(url)
}
func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) {
@@ -239,7 +222,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
return int64(n), err
}
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
if err != nil {
return 0, err
}
@@ -249,7 +232,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
req.Header.Set("Accept-Encoding", "gzip")
}
- r, err := client.Do(req)
+ r, err := GetGlobalHttpClient().Do(req)
if err != nil {
return 0, err
}
@@ -310,7 +293,7 @@ func ReadUrlAsStreamAuthenticated(fileUrl, jwt string, cipherKey []byte, isConte
return readEncryptedUrl(fileUrl, jwt, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
}
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
maybeAddAuth(req, jwt)
if err != nil {
return false, err
@@ -322,7 +305,7 @@ func ReadUrlAsStreamAuthenticated(fileUrl, jwt string, cipherKey []byte, isConte
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
}
- r, err := client.Do(req)
+ r, err := GetGlobalHttpClient().Do(req)
if err != nil {
return true, err
}
@@ -368,12 +351,12 @@ func readEncryptedUrl(fileUrl, jwt string, cipherKey []byte, isContentCompressed
if err != nil {
return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err)
}
- decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey))
+ decryptedData, err := util.Decrypt(encryptedData, util.CipherKey(cipherKey))
if err != nil {
return false, fmt.Errorf("decrypt %s: %v", fileUrl, err)
}
if isContentCompressed {
- decryptedData, err = DecompressData(decryptedData)
+ decryptedData, err = util.DecompressData(decryptedData)
if err != nil {
glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err)
}
@@ -391,7 +374,7 @@ func readEncryptedUrl(fileUrl, jwt string, cipherKey []byte, isContentCompressed
func ReadUrlAsReaderCloser(fileUrl string, jwt string, rangeHeader string) (*http.Response, io.ReadCloser, error) {
- req, err := http.NewRequest("GET", fileUrl, nil)
+ req, err := http.NewRequest(http.MethodGet, fileUrl, nil)
if err != nil {
return nil, nil, err
}
@@ -403,7 +386,7 @@ func ReadUrlAsReaderCloser(fileUrl string, jwt string, rangeHeader string) (*htt
maybeAddAuth(req, jwt)
- r, err := client.Do(req)
+ r, err := GetGlobalHttpClient().Do(req)
if err != nil {
return nil, nil, err
}
@@ -463,7 +446,7 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte,
var shouldRetry bool
- for waitTime := time.Second; waitTime < RetryWaitTime; waitTime += waitTime / 2 {
+ for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
n = 0
if strings.Contains(urlString, "%") {
@@ -494,4 +477,4 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte,
return n, err
-}
+} \ No newline at end of file
diff --git a/weed/util/queue.go b/weed/util/queue.go
index 1e6211e0d..69efc078f 100644
--- a/weed/util/queue.go
+++ b/weed/util/queue.go
@@ -1,35 +1,37 @@
package util
-import "sync"
+import (
+ "sync"
+)
-type node struct {
- data interface{}
- next *node
+type node[T any]struct {
+ data T
+ next *node[T]
}
-type Queue struct {
- head *node
- tail *node
+type Queue[T any] struct {
+ head *node[T]
+ tail *node[T]
count int
sync.RWMutex
}
-func NewQueue() *Queue {
- q := &Queue{}
+func NewQueue[T any]() *Queue[T] {
+ q := &Queue[T]{}
return q
}
-func (q *Queue) Len() int {
+func (q *Queue[T]) Len() int {
q.RLock()
defer q.RUnlock()
return q.count
}
-func (q *Queue) Enqueue(item interface{}) {
+func (q *Queue[T]) Enqueue(item T) {
q.Lock()
defer q.Unlock()
- n := &node{data: item}
+ n := &node[T]{data: item}
if q.tail == nil {
q.tail = n
@@ -41,12 +43,12 @@ func (q *Queue) Enqueue(item interface{}) {
q.count++
}
-func (q *Queue) Dequeue() interface{} {
+func (q *Queue[T]) Dequeue() (result T) {
q.Lock()
defer q.Unlock()
if q.head == nil {
- return nil
+ return
}
n := q.head
@@ -59,3 +61,14 @@ func (q *Queue) Dequeue() interface{} {
return n.data
}
+
+func (q *Queue[T]) Peek() (result T) {
+ q.RLock()
+ defer q.RUnlock()
+
+ if q.head == nil {
+ return
+ }
+
+ return q.head.data
+}
diff --git a/weed/util/queue_test.go b/weed/util/queue_test.go
new file mode 100644
index 000000000..a79552893
--- /dev/null
+++ b/weed/util/queue_test.go
@@ -0,0 +1,22 @@
+package util
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestNewQueue(t *testing.T) {
+
+ q := NewQueue[int]()
+
+ for i := 0; i < 10; i++ {
+ q.Enqueue(i)
+ }
+
+ assert.Equal(t, q.Len(), 10)
+
+ for i := 0; i < 10; i++ {
+ assert.Equal(t, q.Dequeue(), i)
+ }
+
+}
diff --git a/weed/weed.go b/weed/weed.go
index a821cd72f..5139dd39c 100644
--- a/weed/weed.go
+++ b/weed/weed.go
@@ -20,6 +20,7 @@ import (
"github.com/getsentry/sentry-go"
"github.com/seaweedfs/seaweedfs/weed/command"
"github.com/seaweedfs/seaweedfs/weed/glog"
+ util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var IsDebug *bool
@@ -86,6 +87,7 @@ func main() {
return
}
+ util_http.InitGlobalHttpClient()
for _, cmd := range commands {
if cmd.Name() == args[0] && cmd.Run != nil {
cmd.Flag.Usage = func() { cmd.Usage() }