aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--unmaintained/change_superblock/change_superblock.go2
-rw-r--r--unmaintained/fix_dat/fix_dat.go9
-rw-r--r--unmaintained/see_idx/see_idx.go7
-rw-r--r--weed/command/command.go1
-rw-r--r--weed/command/export.go125
-rw-r--r--weed/command/filer.go21
-rw-r--r--weed/command/filer_copy.go12
-rw-r--r--weed/command/fix.go5
-rw-r--r--weed/command/mount.go45
-rw-r--r--weed/command/mount_std.go34
-rw-r--r--weed/command/s3.go76
-rw-r--r--weed/command/server.go3
-rw-r--r--weed/filer2/abstract_sql/abstract_sql_store.go4
-rw-r--r--weed/filer2/cassandra/cassandra_store.go2
-rw-r--r--weed/filer2/filer.go64
-rw-r--r--weed/filer2/filer_master.go4
-rw-r--r--weed/filer2/filerstore.go1
-rw-r--r--weed/filer2/memdb/memdb_store.go2
-rw-r--r--weed/filer2/memdb/memdb_store_test.go2
-rw-r--r--weed/filesys/dir.go95
-rw-r--r--weed/filesys/dir_rename.go2
-rw-r--r--weed/filesys/dirty_page.go17
-rw-r--r--weed/filesys/file.go23
-rw-r--r--weed/filesys/filehandle.go9
-rw-r--r--weed/filesys/wfs.go69
-rw-r--r--weed/operation/assign_file_id.go82
-rw-r--r--weed/operation/sync_volume.go11
-rw-r--r--weed/pb/filer.proto16
-rw-r--r--weed/pb/filer_pb/filer.pb.go244
-rw-r--r--weed/s3api/AmazonS3.xsd692
-rw-r--r--weed/s3api/README.txt7
-rw-r--r--weed/s3api/s3api_bucket_handlers.go177
-rw-r--r--weed/s3api/s3api_errors.go100
-rw-r--r--weed/s3api/s3api_handlers.go100
-rw-r--r--weed/s3api/s3api_object_handlers.go163
-rw-r--r--weed/s3api/s3api_objects_list_handlers.go179
-rw-r--r--weed/s3api/s3api_server.go115
-rw-r--r--weed/s3api/s3api_xsd_generated.go1002
-rw-r--r--weed/server/filer_grpc_server.go79
-rw-r--r--weed/server/filer_server.go47
-rw-r--r--weed/server/filer_server_handlers_read.go10
-rw-r--r--weed/server/filer_server_handlers_write.go95
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go27
-rw-r--r--weed/server/filer_server_handlers_write_monopart.go139
-rw-r--r--weed/server/filer_server_handlers_write_multipart.go39
-rw-r--r--weed/server/master_ui/templates.go2
-rw-r--r--weed/server/raft_server_handlers.go2
-rw-r--r--weed/server/volume_grpc_client.go2
-rw-r--r--weed/server/volume_server.go2
-rw-r--r--weed/server/volume_server_handlers.go10
-rw-r--r--weed/server/volume_server_handlers_sync.go9
-rw-r--r--weed/server/volume_server_handlers_write.go14
-rw-r--r--weed/storage/file_id.go41
-rw-r--r--weed/storage/needle.go139
-rw-r--r--weed/storage/needle/btree_map.go7
-rw-r--r--weed/storage/needle/compact_map.go27
-rw-r--r--weed/storage/needle/compact_map_perf_test.go10
-rw-r--r--weed/storage/needle/compact_map_test.go19
-rw-r--r--weed/storage/needle/needle_value.go13
-rw-r--r--weed/storage/needle/needle_value_map.go10
-rw-r--r--weed/storage/needle_map.go74
-rw-r--r--weed/storage/needle_map_boltdb.go59
-rw-r--r--weed/storage/needle_map_leveldb.go45
-rw-r--r--weed/storage/needle_map_memory.go34
-rw-r--r--weed/storage/needle_map_metric.go106
-rw-r--r--weed/storage/needle_map_metric_test.go30
-rw-r--r--weed/storage/needle_parse_multipart.go100
-rw-r--r--weed/storage/needle_read_write.go51
-rw-r--r--weed/storage/needle_test.go13
-rw-r--r--weed/storage/store.go5
-rw-r--r--weed/storage/types/needle_id_type.go42
-rw-r--r--weed/storage/types/needle_types.go52
-rw-r--r--weed/storage/volume_checking.go15
-rw-r--r--weed/storage/volume_loading.go2
-rw-r--r--weed/storage/volume_read_write.go19
-rw-r--r--weed/storage/volume_sync.go15
-rw-r--r--weed/storage/volume_vacuum.go27
-rw-r--r--weed/storage/volume_vacuum_test.go9
-rw-r--r--weed/tools/read_index.go5
-rw-r--r--weed/topology/configuration.go4
-rw-r--r--weed/topology/topology.go8
-rw-r--r--weed/topology/topology_test.go45
-rw-r--r--weed/topology/volume_layout.go7
-rw-r--r--weed/util/constants.go2
84 files changed, 4084 insertions, 950 deletions
diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go
index d7a80472d..779580a9b 100644
--- a/unmaintained/change_superblock/change_superblock.go
+++ b/unmaintained/change_superblock/change_superblock.go
@@ -87,7 +87,7 @@ func main() {
if hasChange {
- header = superBlock.Bytes()
+ header := superBlock.Bytes()
if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
glog.Fatalf("cannot write super block: %v", e)
diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go
index 8cc6a93d8..10dd94810 100644
--- a/unmaintained/fix_dat/fix_dat.go
+++ b/unmaintained/fix_dat/fix_dat.go
@@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
var (
@@ -105,8 +106,8 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
- padding := storage.NeedlePaddingSize - ((sizeFromIndex + storage.NeedleHeaderSize + storage.NeedleChecksumSize) % storage.NeedlePaddingSize)
- rest = sizeFromIndex + storage.NeedleChecksumSize + padding
+ padding := types.NeedlePaddingSize - ((sizeFromIndex + types.NeedleEntrySize + storage.NeedleChecksumSize) % types.NeedlePaddingSize)
+ rest = int64(sizeFromIndex + storage.NeedleChecksumSize + padding)
func() {
defer func() {
@@ -114,7 +115,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Println("Recovered in f", r)
}
}()
- if err = n.ReadNeedleBody(datFile, version, offset+int64(storage.NeedleHeaderSize), rest); err != nil {
+ if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil {
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
}
}()
@@ -124,7 +125,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
}
visitNeedle(n, offset)
- offset += int64(storage.NeedleHeaderSize) + int64(rest)
+ offset += types.NeedleEntrySize + rest
//fmt.Printf("==> new entry offset %d\n", offset)
if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil {
if err == io.EOF {
diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go
index e7735b9a1..9e8133dde 100644
--- a/unmaintained/see_idx/see_idx.go
+++ b/unmaintained/see_idx/see_idx.go
@@ -8,6 +8,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
var (
@@ -33,6 +35,9 @@ func main() {
}
defer indexFile.Close()
- storage.LoadNeedleMap(indexFile)
+ storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
+ fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
+ return nil
+ })
}
diff --git a/weed/command/command.go b/weed/command/command.go
index c451936e5..c6b005dd9 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -16,6 +16,7 @@ var Commands = []*Command{
cmdServer,
cmdMaster,
cmdFiler,
+ cmdS3,
cmdUpload,
cmdDownload,
cmdShell,
diff --git a/weed/command/export.go b/weed/command/export.go
index 0f7496472..529ee47e3 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -14,6 +14,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "io"
)
const (
@@ -49,9 +51,11 @@ func init() {
}
var (
- output = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout")
- format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Mime}} {{.Id}} {{.Name}} {{.Ext}}")
- newer = cmdExport.Flag.String("newer", "", "export only files newer than this time, default is all files. Must be specified in RFC3339 without timezone, e.g. 2006-01-02T15:04:05")
+ output = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout")
+ format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Mime}} {{.Id}} {{.Name}} {{.Ext}}")
+ newer = cmdExport.Flag.String("newer", "", "export only files newer than this time, default is all files. Must be specified in RFC3339 without timezone, e.g. 2006-01-02T15:04:05")
+ showDeleted = cmdExport.Flag.Bool("deleted", false, "export deleted files. only applies if -o is not specified")
+ limit = cmdExport.Flag.Int("limit", 0, "only show first n entries if specified")
tarOutputFile *tar.Writer
tarHeader tar.Header
@@ -62,6 +66,24 @@ var (
localLocation, _ = time.LoadLocation("Local")
)
+func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Version, deleted bool) {
+ key := storage.NewFileIdFromNeedle(vid, n).String()
+ size := n.DataSize
+ if version == storage.Version1 {
+ size = n.Size
+ }
+ fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n",
+ key,
+ n.Name,
+ size,
+ n.IsGzipped(),
+ n.Mime,
+ n.LastModifiedString(),
+ n.Ttl.String(),
+ deleted,
+ )
+}
+
func runExport(cmd *Command, args []string) bool {
var err error
@@ -125,6 +147,12 @@ func runExport(cmd *Command, args []string) bool {
var version storage.Version
+ if tarOutputFile == nil {
+ fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\n")
+ }
+
+ var counter = 0
+
err = storage.ScanVolumeFile(*export.dir, *export.collection, vid,
storage.NeedleMapInMemory,
func(superBlock storage.SuperBlock) error {
@@ -134,22 +162,39 @@ func runExport(cmd *Command, args []string) bool {
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(), n.IsGzipped(), ok, nv)
- if ok && nv.Size > 0 && int64(nv.Offset)*8 == offset {
+ if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
return nil
}
- return walker(vid, n, version)
+ counter++
+ if *limit > 0 && counter > *limit {
+ return io.EOF
+ }
+ if tarOutputFile != nil {
+ return writeFile(vid, n)
+ } else {
+ printNeedle(vid, n, version, false)
+ return nil
+ }
}
if !ok {
+ if *showDeleted && tarOutputFile == nil {
+ if n.DataSize > 0 {
+ printNeedle(vid, n, version, true)
+ } else {
+ n.Name = []byte("*tombstone")
+ printNeedle(vid, n, version, true)
+ }
+ }
glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
} else {
glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
}
return nil
})
- if err != nil {
+ if err != nil && err != io.EOF {
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
}
return true
@@ -157,57 +202,43 @@ func runExport(cmd *Command, args []string) bool {
type nameParams struct {
Name string
- Id uint64
+ Id types.NeedleId
Mime string
Key string
Ext string
}
-func walker(vid storage.VolumeId, n *storage.Needle, version storage.Version) (err error) {
+func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) {
key := storage.NewFileIdFromNeedle(vid, n).String()
- if tarOutputFile != nil {
- fileNameTemplateBuffer.Reset()
- if err = fileNameTemplate.Execute(fileNameTemplateBuffer,
- nameParams{
- Name: string(n.Name),
- Id: n.Id,
- Mime: string(n.Mime),
- Key: key,
- Ext: filepath.Ext(string(n.Name)),
- },
- ); err != nil {
- return err
- }
+ fileNameTemplateBuffer.Reset()
+ if err = fileNameTemplate.Execute(fileNameTemplateBuffer,
+ nameParams{
+ Name: string(n.Name),
+ Id: n.Id,
+ Mime: string(n.Mime),
+ Key: key,
+ Ext: filepath.Ext(string(n.Name)),
+ },
+ ); err != nil {
+ return err
+ }
- fileName := fileNameTemplateBuffer.String()
+ fileName := fileNameTemplateBuffer.String()
- if n.IsGzipped() && path.Ext(fileName) != ".gz" {
- fileName = fileName + ".gz"
- }
+ if n.IsGzipped() && path.Ext(fileName) != ".gz" {
+ fileName = fileName + ".gz"
+ }
- tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data))
- if n.HasLastModifiedDate() {
- tarHeader.ModTime = time.Unix(int64(n.LastModified), 0)
- } else {
- tarHeader.ModTime = time.Unix(0, 0)
- }
- tarHeader.ChangeTime = tarHeader.ModTime
- if err = tarOutputFile.WriteHeader(&tarHeader); err != nil {
- return err
- }
- _, err = tarOutputFile.Write(n.Data)
+ tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data))
+ if n.HasLastModifiedDate() {
+ tarHeader.ModTime = time.Unix(int64(n.LastModified), 0)
} else {
- size := n.DataSize
- if version == storage.Version1 {
- size = n.Size
- }
- fmt.Printf("key=%s Name=%s Size=%d gzip=%t mime=%s\n",
- key,
- n.Name,
- size,
- n.IsGzipped(),
- n.Mime,
- )
+ tarHeader.ModTime = time.Unix(0, 0)
+ }
+ tarHeader.ChangeTime = tarHeader.ModTime
+ if err = tarOutputFile.WriteHeader(&tarHeader); err != nil {
+ return err
}
+ _, err = tarOutputFile.Write(n.Data)
return
}
diff --git a/weed/command/filer.go b/weed/command/filer.go
index e5a3e379a..8449d84ae 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -30,6 +30,8 @@ type FilerOptions struct {
disableDirListing *bool
maxMB *int
secretKey *string
+ dirListingLimit *int
+ dataCenter *string
}
func init() {
@@ -45,6 +47,8 @@ func init() {
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
+ f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 1000, "limit sub dir listing size")
+ f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
}
var cmdFiler = &Command{
@@ -86,12 +90,17 @@ func (fo *FilerOptions) start() {
masters := *f.masters
- fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux,
- *fo.ip, *fo.port, strings.Split(masters, ","), *fo.collection,
- *fo.defaultReplicaPlacement, *fo.redirectOnRead, *fo.disableDirListing,
- *fo.maxMB,
- *fo.secretKey,
- )
+ fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
+ Masters: strings.Split(masters, ","),
+ Collection: *f.collection,
+ DefaultReplication: *f.defaultReplicaPlacement,
+ RedirectOnRead: *f.redirectOnRead,
+ DisableDirListing: *f.disableDirListing,
+ MaxMB: *f.maxMB,
+ SecretKey: *f.secretKey,
+ DirListingLimit: *f.dirListingLimit,
+ DataCenter: *f.dataCenter,
+ })
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
}
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 9937bc9d6..218abf645 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -8,16 +8,16 @@ import (
"path/filepath"
"strings"
+ "context"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "path"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
"net/http"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "path"
"strconv"
- "io"
"time"
- "context"
- "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -77,7 +77,7 @@ func runCopy(cmd *Command, args []string) bool {
return false
}
filerDestination := args[len(args)-1]
- fileOrDirs := args[0: len(args)-1]
+ fileOrDirs := args[0 : len(args)-1]
filerUrl, err := url.Parse(filerDestination)
if err != nil {
diff --git a/weed/command/fix.go b/weed/command/fix.go
index f3103c6c2..32b09fd72 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func init() {
@@ -54,11 +55,11 @@ func runFix(cmd *Command, args []string) bool {
}, false, func(n *storage.Needle, offset int64) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(), n.IsGzipped())
if n.Size > 0 {
- pe := nm.Put(n.Id, uint32(offset/storage.NeedlePaddingSize), n.Size)
+ pe := nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else {
glog.V(2).Infof("skipping deleted file ...")
- return nm.Delete(n.Id, uint32(offset/storage.NeedlePaddingSize))
+ return nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize))
}
return nil
})
diff --git a/weed/command/mount.go b/weed/command/mount.go
index a4ee03ac5..613066951 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -1,13 +1,22 @@
package command
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
type MountOptions struct {
- filer *string
- filerGrpcPort *int
- dir *string
- collection *string
- replication *string
- ttlSec *int
- chunkSizeLimitMB *int
+ filer *string
+ filerGrpcPort *int
+ filerMountRootPath *string
+ dir *string
+ dirListingLimit *int
+ collection *string
+ replication *string
+ ttlSec *int
+ chunkSizeLimitMB *int
+ dataCenter *string
}
var (
@@ -18,11 +27,14 @@ func init() {
cmdMount.Run = runMount // break init cycle
mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location")
mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000")
+ mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
+ mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 1000, "limit sub dir listing size")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "000", "replication to create to files")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
+ mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
}
var cmdMount = &Command{
@@ -42,3 +54,22 @@ var cmdMount = &Command{
`,
}
+
+func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) {
+ hostnameAndPort := strings.Split(filer, ":")
+ if len(hostnameAndPort) != 2 {
+ return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort)
+ }
+
+ filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("The filer filer port parse error: %v", parseErr)
+ }
+
+ filerGrpcPort := int(filerPort) + 10000
+ if optionalGrpcPort != 0 {
+ filerGrpcPort = optionalGrpcPort
+ }
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
+}
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index f2ec349bc..92dcc9008 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -12,7 +12,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
- "strconv"
)
func runMount(cmd *Command, args []string) bool {
@@ -53,28 +52,27 @@ func runMount(cmd *Command, args []string) bool {
c.Close()
})
- hostnameAndPort := strings.Split(*mountOptions.filer, ":")
- if len(hostnameAndPort) != 2 {
- fmt.Printf("The filer should have hostname:port format: %v\n", hostnameAndPort)
- return false
- }
-
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- fmt.Printf("The filer filer port parse error: %v\n", parseErr)
+ filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort)
+ if err != nil {
+ glog.Fatal(err)
return false
}
- filerGrpcPort := filerPort + 10000
- if *mountOptions.filerGrpcPort != 0 {
- filerGrpcPort = uint64(*copy.filerGrpcPort)
+ mountRoot := *mountOptions.filerMountRootPath
+ if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
+ mountRoot = mountRoot[0 : len(mountRoot)-1]
}
- filerAddress := fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort)
-
- err = fs.Serve(c, filesys.NewSeaweedFileSystem(
- filerAddress, *mountOptions.collection, *mountOptions.replication, int32(*mountOptions.ttlSec),
- *mountOptions.chunkSizeLimitMB))
+ err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
+ FilerGrpcAddress: filerGrpcAddress,
+ FilerMountRootPath: mountRoot,
+ Collection: *mountOptions.collection,
+ Replication: *mountOptions.replication,
+ TtlSec: int32(*mountOptions.ttlSec),
+ ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024,
+ DataCenter: *mountOptions.dataCenter,
+ DirListingLimit: *mountOptions.dirListingLimit,
+ }))
if err != nil {
fuse.Unmount(*mountOptions.dir)
}
diff --git a/weed/command/s3.go b/weed/command/s3.go
new file mode 100644
index 000000000..2d58d93a9
--- /dev/null
+++ b/weed/command/s3.go
@@ -0,0 +1,76 @@
+package command
+
+import (
+ "net/http"
+ "time"
+
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/s3api"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/gorilla/mux"
+)
+
+var (
+ s3options S3Options
+)
+
+type S3Options struct {
+ filer *string
+ filerGrpcPort *int
+ filerBucketsPath *string
+ port *int
+ domainName *string
+}
+
+func init() {
+ cmdS3.Run = runS3 // break init cycle
+ s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
+ s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000")
+ s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
+ s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port")
+ s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
+}
+
+var cmdS3 = &Command{
+ UsageLine: "s3 -port=8333 -filer=<ip:port>",
+ Short: "start a s3 API compatible server that is backed by a filer",
+ Long: `start a s3 API compatible server that is backed by a filer.
+
+`,
+}
+
+func runS3(cmd *Command, args []string) bool {
+
+ filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ router := mux.NewRouter().SkipClean(true)
+
+ _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
+ Filer: *s3options.filer,
+ FilerGrpcAddress: filerGrpcAddress,
+ DomainName: *s3options.domainName,
+ BucketsPath: *s3options.filerBucketsPath,
+ })
+ if s3ApiServer_err != nil {
+ glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
+ }
+
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at port %d", util.VERSION, *s3options.port)
+ s3ApiListener, e := util.NewListener(fmt.Sprintf(":%d", *s3options.port), time.Duration(10)*time.Second)
+ if e != nil {
+ glog.Fatalf("S3 API Server listener error: %v", e)
+ }
+
+ httpS := &http.Server{Handler: router}
+ if err := httpS.Serve(s3ApiListener); err != nil {
+ glog.Fatalf("S3 API Server Fail to serve: %v", e)
+ }
+
+ return true
+
+}
diff --git a/weed/command/server.go b/weed/command/server.go
index 485dea7ac..983b3075f 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -89,6 +89,7 @@ func init() {
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
+ filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
}
func runServer(cmd *Command, args []string) bool {
@@ -109,6 +110,8 @@ func runServer(cmd *Command, args []string) bool {
master := *serverIp + ":" + strconv.Itoa(*masterPort)
filerOptions.ip = serverIp
+ filerOptions.dataCenter = serverDataCenter
+
if *filerOptions.defaultReplicaPlacement == "" {
*filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement
}
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
index 82ef571b6..5f2990475 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer2/abstract_sql/abstract_sql_store.go
@@ -64,7 +64,7 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
- return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
+ return nil, filer2.ErrNotFound
}
entry := &filer2.Entry{
@@ -77,7 +77,7 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
return entry, nil
}
-func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) (error) {
+func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
dir, name := fullpath.DirAndName()
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
index d731cd99c..7552cb524 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer2/cassandra/cassandra_store.go
@@ -68,7 +68,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
- return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
+ return nil, filer2.ErrNotFound
}
}
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
index 53de5bea3..2deb8ffd5 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer2/filer.go
@@ -4,12 +4,13 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/karlseguin/ccache"
"os"
"path/filepath"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/operation"
)
type Filer struct {
@@ -80,6 +81,8 @@ func (f *Filer) CreateEntry(entry *Entry) error {
if mkdirErr != nil {
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
+ } else if !dirEntry.IsDirectory() {
+ return fmt.Errorf("%s is a file", dirPath)
}
// cache the directory entry
@@ -110,7 +113,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
- f.deleteChunks(oldEntry)
+ f.deleteChunksIfNotNew(oldEntry, entry)
return nil
}
@@ -123,7 +126,7 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
return f.store.FindEntry(p)
}
-func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err error) {
+func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) {
entry, err := f.FindEntry(p)
if err != nil {
return err
@@ -134,13 +137,20 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err
if err != nil {
return fmt.Errorf("list folder %s: %v", p, err)
}
- if len(entries) > 0 {
- return fmt.Errorf("folder %s is not empty", p)
+ if isRecursive {
+ for _, sub := range entries {
+ f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks)
+ }
+ } else {
+ if len(entries) > 0 {
+ return fmt.Errorf("folder %s is not empty", p)
+ }
}
+ f.cacheDelDirectory(string(p))
}
if shouldDeleteChunks {
- f.deleteChunks(entry)
+ f.deleteChunks(entry.Chunks)
}
return f.store.DeleteEntry(p)
@@ -148,11 +158,19 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err
func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0: len(p)-1]
+ p = p[0 : len(p)-1]
}
return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit)
}
+func (f *Filer) cacheDelDirectory(dirpath string) {
+ if f.directoryCache == nil {
+ return
+ }
+ f.directoryCache.Delete(dirpath)
+ return
+}
+
func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
if f.directoryCache == nil {
return nil
@@ -178,14 +196,36 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
-func (f *Filer) deleteChunks(entry *Entry) {
+func (f *Filer) deleteChunks(chunks []*filer_pb.FileChunk) {
+ for _, chunk := range chunks {
+ if err := operation.DeleteFile(f.GetMaster(), chunk.FileId, ""); err != nil {
+ glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
+ }
+ }
+}
- if entry == nil {
+func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
+
+ if oldEntry == nil {
return
}
- for _, chunk := range entry.Chunks {
- if err := operation.DeleteFile(f.GetMaster(), chunk.FileId, ""); err != nil {
- glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
+ if newEntry == nil {
+ f.deleteChunks(oldEntry.Chunks)
+ }
+
+ var toDelete []*filer_pb.FileChunk
+
+ for _, oldChunk := range oldEntry.Chunks {
+ found := false
+ for _, newChunk := range newEntry.Chunks {
+ if oldChunk.FileId == newChunk.FileId {
+ found = true
+ break
+ }
+ }
+ if !found {
+ toDelete = append(toDelete, oldChunk)
}
}
+ f.deleteChunks(toDelete)
}
diff --git a/weed/filer2/filer_master.go b/weed/filer2/filer_master.go
index 51b12c237..63c3ef452 100644
--- a/weed/filer2/filer_master.go
+++ b/weed/filer2/filer_master.go
@@ -1,12 +1,12 @@
package filer2
import (
- "fmt"
"context"
+ "fmt"
"time"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
index b169c6f80..68fc06a5d 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer2/filerstore.go
@@ -11,6 +11,7 @@ type FilerStore interface {
Initialize(configuration Configuration) error
InsertEntry(*Entry) error
UpdateEntry(*Entry) (err error)
+ // err == filer2.ErrNotFound if not found
FindEntry(FullPath) (entry *Entry, err error)
DeleteEntry(FullPath) (err error)
ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go
index 48a9bea31..08cee0afd 100644
--- a/weed/filer2/memdb/memdb_store.go
+++ b/weed/filer2/memdb/memdb_store.go
@@ -49,7 +49,7 @@ func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
if item == nil {
- return nil, nil
+ return nil, filer2.ErrNotFound
}
entry = item.(entryItem).Entry
return entry, nil
diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go
index 84ee06ce1..cf813e04b 100644
--- a/weed/filer2/memdb/memdb_store_test.go
+++ b/weed/filer2/memdb/memdb_store_test.go
@@ -134,7 +134,7 @@ func TestCreateFileAndList(t *testing.T) {
}
// delete file and count
- filer.DeleteEntryMetaAndData(file3Path, false)
+ filer.DeleteEntryMetaAndData(file3Path, false, false)
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index d2de215fc..06c5f2569 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -4,17 +4,20 @@ import (
"context"
"os"
"path"
+ "path/filepath"
+ "time"
+
"bazil.org/fuse"
"bazil.org/fuse/fs"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "path/filepath"
- "time"
)
type Dir struct {
- Path string
- wfs *WFS
+ Path string
+ wfs *WFS
+ attributes *filer_pb.FuseAttributes
}
var _ = fs.Node(&Dir{})
@@ -24,6 +27,7 @@ var _ = fs.NodeRequestLookuper(&Dir{})
var _ = fs.HandleReadDirAller(&Dir{})
var _ = fs.NodeRemover(&Dir{})
var _ = fs.NodeRenamer(&Dir{})
+var _ = fs.NodeSetattrer(&Dir{})
func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
@@ -48,8 +52,6 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
parent, name := filepath.Split(dir.Path)
- var attributes *filer_pb.FuseAttributes
-
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetEntryAttributesRequest{
@@ -64,7 +66,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
return err
}
- attributes = resp.Attributes
+ dir.attributes = resp.Attributes
return nil
})
@@ -76,26 +78,26 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
// glog.V(1).Infof("dir %s: %v", dir.Path, attributes)
// glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode))
- attr.Mode = os.FileMode(attributes.FileMode) | os.ModeDir
- if dir.Path == "/" && attributes.FileMode == 0 {
+ attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir
+ if dir.Path == "/" && dir.attributes.FileMode == 0 {
attr.Valid = time.Second
}
- attr.Mtime = time.Unix(attributes.Mtime, 0)
- attr.Ctime = time.Unix(attributes.Crtime, 0)
- attr.Gid = attributes.Gid
- attr.Uid = attributes.Uid
+ attr.Mtime = time.Unix(dir.attributes.Mtime, 0)
+ attr.Ctime = time.Unix(dir.attributes.Crtime, 0)
+ attr.Gid = dir.attributes.Gid
+ attr.Uid = dir.attributes.Uid
return nil
}
-func (dir *Dir) newFile(name string, chunks []*filer_pb.FileChunk) *File {
+func (dir *Dir) newFile(name string, chunks []*filer_pb.FileChunk, attr *filer_pb.FuseAttributes) *File {
return &File{
- Name: name,
- dir: dir,
- wfs: dir.wfs,
- // attributes: &filer_pb.FuseAttributes{},
- Chunks: chunks,
+ Name: name,
+ dir: dir,
+ wfs: dir.wfs,
+ attributes: attr,
+ Chunks: chunks,
}
}
@@ -115,9 +117,9 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
- Collection: dir.wfs.collection,
- Replication: dir.wfs.replication,
- TtlSec: dir.wfs.ttlSec,
+ Collection: dir.wfs.option.Collection,
+ Replication: dir.wfs.option.Replication,
+ TtlSec: dir.wfs.option.TtlSec,
},
},
}
@@ -132,7 +134,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
})
if err == nil {
- file := dir.newFile(req.Name, nil)
+ file := dir.newFile(req.Name, nil, &filer_pb.FuseAttributes{})
file.isOpen = true
return file, dir.wfs.AcquireHandle(file, req.Uid, req.Gid), nil
}
@@ -200,9 +202,9 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
if entry != nil {
if entry.IsDirectory {
- node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
+ node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, attributes: entry.Attributes}
} else {
- node = dir.newFile(req.Name, entry.Chunks)
+ node = dir.newFile(req.Name, entry.Chunks, entry.Attributes)
}
resp.EntryValid = time.Duration(0)
@@ -224,6 +226,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
request := &filer_pb.ListEntriesRequest{
Directory: dir.Path,
+ Limit: uint32(dir.wfs.option.DirListingLimit),
}
glog.V(4).Infof("read directory: %v", request)
@@ -272,3 +275,45 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
})
}
+
+func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
+
+ glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle)
+ if req.Valid.Mode() {
+ dir.attributes.FileMode = uint32(req.Mode)
+ }
+
+ if req.Valid.Uid() {
+ dir.attributes.Uid = req.Uid
+ }
+
+ if req.Valid.Gid() {
+ dir.attributes.Gid = req.Gid
+ }
+
+ if req.Valid.Mtime() {
+ dir.attributes.Mtime = req.Mtime.Unix()
+ }
+
+ parentDir, name := filer2.FullPath(dir.Path).DirAndName()
+ return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.UpdateEntryRequest{
+ Directory: parentDir,
+ Entry: &filer_pb.Entry{
+ Name: name,
+ Attributes: dir.attributes,
+ },
+ }
+
+ glog.V(1).Infof("set attr directory entry: %v", request)
+ _, err := client.UpdateEntry(ctx, request)
+ if err != nil {
+ glog.V(0).Infof("UpdateEntry %s: %v", dir.Path, err)
+ return fuse.EIO
+ }
+
+ return nil
+ })
+
+}
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index a89c51b31..7ba515202 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -1,9 +1,9 @@
package filesys
import (
- "context"
"bazil.org/fuse"
"bazil.org/fuse/fs"
+ "context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath"
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 172262c98..877932610 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -1,14 +1,14 @@
package filesys
import (
- "fmt"
"bytes"
- "time"
"context"
+ "fmt"
+ "time"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type ContinuousDirtyPages struct {
@@ -21,7 +21,7 @@ type ContinuousDirtyPages struct {
func newDirtyPages(file *File) *ContinuousDirtyPages {
return &ContinuousDirtyPages{
- Data: make([]byte, file.wfs.chunkSizeLimit),
+ Data: make([]byte, file.wfs.option.ChunkSizeLimit),
f: file,
}
}
@@ -119,9 +119,10 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
request := &filer_pb.AssignVolumeRequest{
Count: 1,
- Replication: pages.f.wfs.replication,
- Collection: pages.f.wfs.collection,
- TtlSec: pages.f.wfs.ttlSec,
+ Replication: pages.f.wfs.option.Replication,
+ Collection: pages.f.wfs.option.Collection,
+ TtlSec: pages.f.wfs.option.TtlSec,
+ DataCenter: pages.f.wfs.option.DataCenter,
}
resp, err := client.AssignVolume(ctx, request)
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 95f478a76..3f22f0571 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -34,7 +34,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
if file.attributes == nil || !file.isOpen {
item := file.wfs.listDirectoryEntriesCache.Get(file.fullpath())
- if item != nil && !item.Expired(){
+ if item != nil && !item.Expired() {
entry := item.Value().(*filer_pb.Entry)
file.Chunks = entry.Chunks
file.attributes = entry.Attributes
@@ -121,7 +121,26 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.attributes.Mtime = req.Mtime.Unix()
}
- return nil
+ return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.UpdateEntryRequest{
+ Directory: file.dir.Path,
+ Entry: &filer_pb.Entry{
+ Name: file.Name,
+ Attributes: file.attributes,
+ Chunks: file.Chunks,
+ },
+ }
+
+ glog.V(1).Infof("set attr file entry: %v", request)
+ _, err := client.UpdateEntry(ctx, request)
+ if err != nil {
+ glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err)
+ return fuse.EIO
+ }
+
+ return nil
+ })
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 74125dc09..0c13db984 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -9,15 +9,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "net/http"
"strings"
"sync"
- "net/http"
)
type FileHandle struct {
// cache file has been written to
dirtyPages *ContinuousDirtyPages
dirtyMetadata bool
+ contentType string
handle uint64
@@ -145,7 +146,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
resp.Size = len(req.Data)
if req.Offset == 0 {
- fh.f.attributes.Mime = http.DetectContentType(req.Data)
+ fh.contentType = http.DetectContentType(req.Data)
fh.dirtyMetadata = true
}
@@ -197,6 +198,10 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
err = fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if fh.f.attributes != nil {
+ fh.f.attributes.Mime = fh.contentType
+ }
+
request := &filer_pb.UpdateEntryRequest{
Directory: fh.f.dir.Path,
Entry: &filer_pb.Entry{
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index d7e133483..9b1b98ebf 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -1,23 +1,30 @@
package filesys
import (
+ "bazil.org/fuse"
"bazil.org/fuse/fs"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/karlseguin/ccache"
"sync"
- "bazil.org/fuse"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
)
+type Option struct {
+ FilerGrpcAddress string
+ FilerMountRootPath string
+ Collection string
+ Replication string
+ TtlSec int32
+ ChunkSizeLimit int64
+ DataCenter string
+ DirListingLimit int
+}
+
type WFS struct {
- filerGrpcAddress string
+ option *Option
listDirectoryEntriesCache *ccache.Cache
- collection string
- replication string
- ttlSec int32
- chunkSizeLimit int64
// contains all open handles
handles []*FileHandle
@@ -25,27 +32,23 @@ type WFS struct {
pathToHandleLock sync.Mutex
}
-func NewSeaweedFileSystem(filerGrpcAddress string, collection string, replication string, ttlSec int32, chunkSizeLimitMB int) *WFS {
+func NewSeaweedFileSystem(option *Option) *WFS {
return &WFS{
- filerGrpcAddress: filerGrpcAddress,
+ option: option,
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(6000).ItemsToPrune(100)),
- collection: collection,
- replication: replication,
- ttlSec: ttlSec,
- chunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
pathToHandleIndex: make(map[string]int),
}
}
func (wfs *WFS) Root() (fs.Node, error) {
- return &Dir{Path: "/", wfs: wfs}, nil
+ return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil
}
func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
- grpcConnection, err := util.GrpcDial(wfs.filerGrpcAddress)
+ grpcConnection, err := util.GrpcDial(wfs.option.FilerGrpcAddress)
if err != nil {
- return fmt.Errorf("fail to dial %s: %v", wfs.filerGrpcAddress, err)
+ return fmt.Errorf("fail to dial %s: %v", wfs.option.FilerGrpcAddress, err)
}
defer grpcConnection.Close()
@@ -54,7 +57,7 @@ func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) erro
return fn(client)
}
-func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (handle *FileHandle) {
+func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
wfs.pathToHandleLock.Lock()
defer wfs.pathToHandleLock.Unlock()
@@ -62,39 +65,33 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (handle *FileHandle)
index, found := wfs.pathToHandleIndex[fullpath]
if found && wfs.handles[index] != nil {
- glog.V(4).Infoln(fullpath, "found handle id", index)
+ glog.V(4).Infoln(fullpath, "found fileHandle id", index)
return wfs.handles[index]
}
- // create a new handler
- handle = &FileHandle{
- f: file,
- dirtyPages: newDirtyPages(file),
- Uid: uid,
- Gid: gid,
- }
+ fileHandle = newFileHandle(file, uid, gid)
if found && wfs.handles[index] != nil {
- glog.V(4).Infoln(fullpath, "reuse previous handle id", index)
- wfs.handles[index] = handle
- handle.handle = uint64(index)
+ glog.V(4).Infoln(fullpath, "reuse previous fileHandle id", index)
+ wfs.handles[index] = fileHandle
+ fileHandle.handle = uint64(index)
return
}
for i, h := range wfs.handles {
if h == nil {
- wfs.handles[i] = handle
- handle.handle = uint64(i)
+ wfs.handles[i] = fileHandle
+ fileHandle.handle = uint64(i)
wfs.pathToHandleIndex[fullpath] = i
- glog.V(4).Infoln(fullpath, "reuse handle id", handle.handle)
+ glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle)
return
}
}
- wfs.handles = append(wfs.handles, handle)
- handle.handle = uint64(len(wfs.handles) - 1)
- glog.V(4).Infoln(fullpath, "new handle id", handle.handle)
- wfs.pathToHandleIndex[fullpath] = int(handle.handle)
+ wfs.handles = append(wfs.handles, fileHandle)
+ fileHandle.handle = uint64(len(wfs.handles) - 1)
+ glog.V(4).Infoln(fullpath, "new fileHandle id", fileHandle.handle)
+ wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
return
}
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index a3466bdd2..169fd664d 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -2,7 +2,6 @@ package operation
import (
"encoding/json"
- "errors"
"fmt"
"net/url"
"strconv"
@@ -29,40 +28,53 @@ type AssignResult struct {
Error string `json:"error,omitempty"`
}
-func Assign(server string, r *VolumeAssignRequest) (*AssignResult, error) {
- values := make(url.Values)
- values.Add("count", strconv.FormatUint(r.Count, 10))
- if r.Replication != "" {
- values.Add("replication", r.Replication)
- }
- if r.Collection != "" {
- values.Add("collection", r.Collection)
- }
- if r.Ttl != "" {
- values.Add("ttl", r.Ttl)
- }
- if r.DataCenter != "" {
- values.Add("dataCenter", r.DataCenter)
- }
- if r.Rack != "" {
- values.Add("rack", r.Rack)
- }
- if r.DataNode != "" {
- values.Add("dataNode", r.DataNode)
- }
+func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
+ var requests []*VolumeAssignRequest
+ requests = append(requests, primaryRequest)
+ requests = append(requests, alternativeRequests...)
- jsonBlob, err := util.Post("http://"+server+"/dir/assign", values)
- glog.V(2).Infof("assign result from %s : %s", server, string(jsonBlob))
- if err != nil {
- return nil, err
- }
- var ret AssignResult
- err = json.Unmarshal(jsonBlob, &ret)
- if err != nil {
- return nil, fmt.Errorf("/dir/assign result JSON unmarshal error:%v, json:%s", err, string(jsonBlob))
- }
- if ret.Count <= 0 {
- return nil, errors.New(ret.Error)
+ var lastError error
+ for i, request := range requests {
+ if request == nil {
+ continue
+ }
+ values := make(url.Values)
+ values.Add("count", strconv.FormatUint(request.Count, 10))
+ if request.Replication != "" {
+ values.Add("replication", request.Replication)
+ }
+ if request.Collection != "" {
+ values.Add("collection", request.Collection)
+ }
+ if request.Ttl != "" {
+ values.Add("ttl", request.Ttl)
+ }
+ if request.DataCenter != "" {
+ values.Add("dataCenter", request.DataCenter)
+ }
+ if request.Rack != "" {
+ values.Add("rack", request.Rack)
+ }
+ if request.DataNode != "" {
+ values.Add("dataNode", request.DataNode)
+ }
+
+ postUrl := fmt.Sprintf("http://%s/dir/assign", server)
+ jsonBlob, err := util.Post(postUrl, values)
+ glog.V(2).Infof("assign %d result from %s %+v : %s", i, postUrl, values, string(jsonBlob))
+ if err != nil {
+ return nil, err
+ }
+ var ret AssignResult
+ err = json.Unmarshal(jsonBlob, &ret)
+ if err != nil {
+ return nil, fmt.Errorf("/dir/assign result JSON unmarshal error:%v, json:%s", err, string(jsonBlob))
+ }
+ if ret.Count <= 0 {
+ lastError = fmt.Errorf("assign failure %d: %v", i+1, ret.Error)
+ continue
+ }
+ return &ret, nil
}
- return &ret, nil
+ return nil, lastError
}
diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go
index b7a727fc7..d4434b318 100644
--- a/weed/operation/sync_volume.go
+++ b/weed/operation/sync_volume.go
@@ -6,6 +6,7 @@ import (
"net/url"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -37,14 +38,14 @@ func GetVolumeSyncStatus(server string, vid string) (*SyncVolumeResponse, error)
return &ret, nil
}
-func GetVolumeIdxEntries(server string, vid string, eachEntryFn func(key uint64, offset, size uint32)) error {
+func GetVolumeIdxEntries(server string, vid string, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error {
values := make(url.Values)
values.Add("volume", vid)
- line := make([]byte, 16)
+ line := make([]byte, NeedleEntrySize)
err := util.GetBufferStream("http://"+server+"/admin/sync/index", values, line, func(bytes []byte) {
- key := util.BytesToUint64(bytes[:8])
- offset := util.BytesToUint32(bytes[8:12])
- size := util.BytesToUint32(bytes[12:16])
+ key := BytesToNeedleId(line[:NeedleIdSize])
+ offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize])
+ size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
eachEntryFn(key, offset, size)
})
if err != nil {
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index 684782cfe..1197d836e 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -30,6 +30,9 @@ service SeaweedFiler {
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
+ rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -45,6 +48,10 @@ message LookupDirectoryEntryResponse {
message ListEntriesRequest {
string directory = 1;
+ string prefix = 2;
+ string startFromFileName = 3;
+ bool inclusiveStartFrom = 4;
+ uint32 limit = 5;
}
message ListEntriesResponse {
@@ -117,6 +124,7 @@ message DeleteEntryRequest {
string name = 2;
bool is_directory = 3;
bool is_delete_data = 4;
+ bool is_recursive = 5;
}
message DeleteEntryResponse {
@@ -127,6 +135,7 @@ message AssignVolumeRequest {
string collection = 2;
string replication = 3;
int32 ttl_sec = 4;
+ string data_center = 5;
}
message AssignVolumeResponse {
@@ -151,3 +160,10 @@ message Location {
message LookupVolumeResponse {
map<string, Locations> locations_map = 1;
}
+
+message DeleteCollectionRequest {
+ string collection = 1;
+}
+
+message DeleteCollectionResponse {
+}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 2675ab6a0..d70917709 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -32,6 +32,8 @@ It has these top-level messages:
Locations
Location
LookupVolumeResponse
+ DeleteCollectionRequest
+ DeleteCollectionResponse
*/
package filer_pb
@@ -96,7 +98,11 @@ func (m *LookupDirectoryEntryResponse) GetEntry() *Entry {
}
type ListEntriesRequest struct {
- Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
+ Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
+ Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"`
+ StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"`
+ InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"`
+ Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"`
}
func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} }
@@ -111,6 +117,34 @@ func (m *ListEntriesRequest) GetDirectory() string {
return ""
}
+func (m *ListEntriesRequest) GetPrefix() string {
+ if m != nil {
+ return m.Prefix
+ }
+ return ""
+}
+
+func (m *ListEntriesRequest) GetStartFromFileName() string {
+ if m != nil {
+ return m.StartFromFileName
+ }
+ return ""
+}
+
+func (m *ListEntriesRequest) GetInclusiveStartFrom() bool {
+ if m != nil {
+ return m.InclusiveStartFrom
+ }
+ return false
+}
+
+func (m *ListEntriesRequest) GetLimit() uint32 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
type ListEntriesResponse struct {
Entries []*Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
}
@@ -452,6 +486,7 @@ type DeleteEntryRequest struct {
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
IsDirectory bool `protobuf:"varint,3,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"`
+ IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"`
}
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
@@ -487,6 +522,13 @@ func (m *DeleteEntryRequest) GetIsDeleteData() bool {
return false
}
+func (m *DeleteEntryRequest) GetIsRecursive() bool {
+ if m != nil {
+ return m.IsRecursive
+ }
+ return false
+}
+
type DeleteEntryResponse struct {
}
@@ -500,6 +542,7 @@ type AssignVolumeRequest struct {
Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
+ DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
}
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
@@ -535,6 +578,13 @@ func (m *AssignVolumeRequest) GetTtlSec() int32 {
return 0
}
+func (m *AssignVolumeRequest) GetDataCenter() string {
+ if m != nil {
+ return m.DataCenter
+ }
+ return ""
+}
+
type AssignVolumeResponse struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
@@ -647,6 +697,30 @@ func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
return nil
}
+type DeleteCollectionRequest struct {
+ Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
+}
+
+func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
+func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteCollectionRequest) ProtoMessage() {}
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *DeleteCollectionRequest) GetCollection() string {
+ if m != nil {
+ return m.Collection
+ }
+ return ""
+}
+
+type DeleteCollectionResponse struct {
+}
+
+func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
+func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteCollectionResponse) ProtoMessage() {}
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
@@ -671,6 +745,8 @@ func init() {
proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
proto.RegisterType((*Location)(nil), "filer_pb.Location")
proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
+ proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest")
+ proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -692,6 +768,7 @@ type SeaweedFilerClient interface {
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
+ DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
}
type seaweedFilerClient struct {
@@ -774,6 +851,15 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR
return out, nil
}
+func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
+ out := new(DeleteCollectionResponse)
+ err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for SeaweedFiler service
type SeaweedFilerServer interface {
@@ -785,6 +871,7 @@ type SeaweedFilerServer interface {
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
+ DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -935,6 +1022,24 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteCollectionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).DeleteCollection(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/DeleteCollection",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -971,6 +1076,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "LookupVolume",
Handler: _SeaweedFiler_LookupVolume_Handler,
},
+ {
+ MethodName: "DeleteCollection",
+ Handler: _SeaweedFiler_DeleteCollection_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "filer.proto",
@@ -979,66 +1088,75 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 971 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xdc, 0x44,
- 0x14, 0x8e, 0xd7, 0xeb, 0xcd, 0xfa, 0xec, 0xa6, 0xc0, 0x6c, 0x5a, 0xcc, 0x36, 0xa9, 0x16, 0xd3,
- 0xa2, 0x54, 0x48, 0x51, 0x14, 0xb8, 0xa8, 0x40, 0x48, 0x54, 0x4d, 0xa9, 0x2a, 0xa5, 0xaa, 0xe4,
- 0x10, 0x24, 0xae, 0x56, 0x8e, 0x7d, 0x76, 0x19, 0xc5, 0x6b, 0x1b, 0xcf, 0x38, 0x28, 0xdc, 0x22,
- 0x71, 0xc3, 0x05, 0x4f, 0xc1, 0x1b, 0xf0, 0x06, 0xbc, 0x18, 0x9a, 0x1f, 0x7b, 0xc7, 0x6b, 0x6f,
- 0x7f, 0x2e, 0xb8, 0x9b, 0x39, 0x3f, 0xdf, 0xf9, 0x8e, 0x7d, 0xce, 0x67, 0xc3, 0x68, 0x41, 0x13,
- 0x2c, 0x8e, 0xf3, 0x22, 0xe3, 0x19, 0x19, 0xca, 0xcb, 0x3c, 0xbf, 0xf2, 0x5f, 0xc3, 0xfd, 0xf3,
- 0x2c, 0xbb, 0x2e, 0xf3, 0x33, 0x5a, 0x60, 0xc4, 0xb3, 0xe2, 0xf6, 0x79, 0xca, 0x8b, 0xdb, 0x00,
- 0x7f, 0x29, 0x91, 0x71, 0x72, 0x00, 0x6e, 0x5c, 0x39, 0x3c, 0x6b, 0x66, 0x1d, 0xb9, 0xc1, 0xda,
- 0x40, 0x08, 0xf4, 0xd3, 0x70, 0x85, 0x5e, 0x4f, 0x3a, 0xe4, 0xd9, 0x7f, 0x0e, 0x07, 0xdd, 0x80,
- 0x2c, 0xcf, 0x52, 0x86, 0xe4, 0x11, 0x38, 0x28, 0x0c, 0x12, 0x6d, 0x74, 0xfa, 0xc1, 0x71, 0x45,
- 0xe5, 0x58, 0xc5, 0x29, 0xaf, 0x7f, 0x0a, 0xe4, 0x9c, 0x32, 0x2e, 0x6c, 0x14, 0xd9, 0x3b, 0xd1,
- 0xf1, 0xbf, 0x83, 0x49, 0x23, 0x47, 0x57, 0x7c, 0x0c, 0xbb, 0xa8, 0x4c, 0x9e, 0x35, 0xb3, 0xbb,
- 0x6a, 0x56, 0x7e, 0xff, 0x6f, 0x0b, 0x1c, 0x69, 0xaa, 0x5b, 0xb3, 0xd6, 0xad, 0x91, 0x4f, 0x61,
- 0x4c, 0xd9, 0x7c, 0x4d, 0x40, 0xb4, 0x3d, 0x0c, 0x46, 0x94, 0xd5, 0xad, 0x92, 0x2f, 0x60, 0x10,
- 0xfd, 0x5c, 0xa6, 0xd7, 0xcc, 0xb3, 0x65, 0xa9, 0xc9, 0xba, 0xd4, 0xf7, 0x34, 0xc1, 0x67, 0xc2,
- 0x17, 0xe8, 0x10, 0xf2, 0x04, 0x20, 0xe4, 0xbc, 0xa0, 0x57, 0x25, 0x47, 0xe6, 0xf5, 0xe5, 0xf3,
- 0xf0, 0x8c, 0x84, 0x92, 0xe1, 0xd3, 0xda, 0x1f, 0x18, 0xb1, 0xfe, 0x02, 0xdc, 0x1a, 0x8e, 0x7c,
- 0x0c, 0xbb, 0x22, 0x67, 0x4e, 0x63, 0xcd, 0x76, 0x20, 0xae, 0x2f, 0x63, 0x72, 0x0f, 0x06, 0xd9,
- 0x62, 0xc1, 0x90, 0x4b, 0xa6, 0x76, 0xa0, 0x6f, 0xa2, 0x37, 0x46, 0x7f, 0x43, 0xcf, 0x9e, 0x59,
- 0x47, 0xfd, 0x40, 0x9e, 0xc9, 0x3e, 0x38, 0x2b, 0x4e, 0x57, 0x28, 0x69, 0xd8, 0x81, 0xba, 0xf8,
- 0x7f, 0xf6, 0xe0, 0x4e, 0x93, 0x06, 0xb9, 0x0f, 0xae, 0xac, 0x26, 0x11, 0x2c, 0x89, 0x20, 0xa7,
- 0xe9, 0xa2, 0x81, 0xd2, 0x33, 0x50, 0xea, 0x94, 0x55, 0x16, 0xab, 0xa2, 0x7b, 0x2a, 0xe5, 0x55,
- 0x16, 0x23, 0xf9, 0x10, 0xec, 0x92, 0xc6, 0xb2, 0xec, 0x5e, 0x20, 0x8e, 0xc2, 0xb2, 0xa4, 0xb1,
- 0xe7, 0x28, 0xcb, 0x92, 0xca, 0x46, 0xa2, 0x42, 0xe2, 0x0e, 0x54, 0x23, 0xea, 0x26, 0x1a, 0x59,
- 0x09, 0xeb, 0xae, 0x7a, 0x49, 0xe2, 0x4c, 0x66, 0x30, 0x2a, 0x30, 0x4f, 0x68, 0x14, 0x72, 0x9a,
- 0xa5, 0xde, 0x50, 0xba, 0x4c, 0x13, 0x79, 0x00, 0x10, 0x65, 0x49, 0x82, 0x91, 0x0c, 0x70, 0x65,
- 0x80, 0x61, 0x11, 0xcf, 0x93, 0xf3, 0x64, 0xce, 0x30, 0xf2, 0x60, 0x66, 0x1d, 0x39, 0xc1, 0x80,
- 0xf3, 0xe4, 0x02, 0x23, 0x7f, 0x09, 0x9f, 0xbc, 0x40, 0x39, 0x5e, 0xb7, 0xc6, 0x7b, 0xd1, 0xa3,
- 0xd9, 0x35, 0x30, 0x87, 0x00, 0x79, 0x58, 0x60, 0xca, 0xc5, 0xd0, 0xe8, 0x2d, 0x71, 0x95, 0xe5,
- 0x8c, 0x16, 0xe6, 0x8b, 0xb3, 0xcd, 0x17, 0xe7, 0xff, 0x6e, 0xc1, 0xb4, 0xab, 0x92, 0x1e, 0xe8,
- 0xe6, 0xdc, 0x58, 0xef, 0x3e, 0x37, 0xc6, 0x78, 0xf6, 0xde, 0x3a, 0x9e, 0xfe, 0x09, 0xdc, 0x7d,
- 0x81, 0x5c, 0xda, 0xb3, 0x94, 0x63, 0xca, 0xab, 0x56, 0xb7, 0x0d, 0x9c, 0x7f, 0x0a, 0xf7, 0x36,
- 0x33, 0x34, 0x65, 0x0f, 0x76, 0x23, 0x65, 0x92, 0x29, 0xe3, 0xa0, 0xba, 0xfa, 0x3f, 0x01, 0x79,
- 0x56, 0x60, 0xc8, 0xf1, 0x3d, 0x74, 0xa7, 0xd6, 0x90, 0xde, 0x1b, 0x35, 0xe4, 0x2e, 0x4c, 0x1a,
- 0xd0, 0x8a, 0x8b, 0xa8, 0x78, 0x99, 0xc7, 0xff, 0x57, 0xc5, 0x06, 0xb4, 0xae, 0xf8, 0x97, 0x05,
- 0xe4, 0x0c, 0x13, 0x7c, 0xaf, 0x92, 0x1d, 0xe2, 0xda, 0x52, 0x20, 0xbb, 0xad, 0x40, 0x0f, 0xe1,
- 0x8e, 0x08, 0x91, 0xd5, 0xe6, 0x71, 0xc8, 0x43, 0xb9, 0x5a, 0xc3, 0x60, 0x4c, 0x99, 0xa2, 0x70,
- 0x16, 0xf2, 0x50, 0x10, 0x6d, 0x10, 0xd2, 0x44, 0xff, 0xb0, 0x60, 0xf2, 0x94, 0x31, 0xba, 0x4c,
- 0x7f, 0xcc, 0x92, 0x72, 0x85, 0x15, 0xd3, 0x7d, 0x70, 0xa2, 0xac, 0xd4, 0x2f, 0xcf, 0x09, 0xd4,
- 0x65, 0x63, 0x91, 0x7a, 0xad, 0x45, 0xda, 0x58, 0x45, 0xbb, 0xbd, 0x8a, 0xc6, 0xaa, 0xf5, 0x1b,
- 0xab, 0x76, 0x03, 0xfb, 0x4d, 0x1e, 0x7a, 0x8e, 0xb6, 0x6a, 0x9d, 0x90, 0x91, 0x22, 0xd1, 0x24,
- 0xc4, 0x51, 0x2e, 0x5f, 0x79, 0x95, 0xd0, 0x68, 0x2e, 0x1c, 0xb6, 0x5e, 0x3e, 0x69, 0xb9, 0x2c,
- 0x92, 0x75, 0x4b, 0x7d, 0xa3, 0x25, 0xff, 0x2b, 0x98, 0xa8, 0xaf, 0x57, 0xb3, 0xff, 0x43, 0x80,
- 0x1b, 0x69, 0x98, 0xd3, 0x58, 0x7d, 0x45, 0xdc, 0xc0, 0x55, 0x96, 0x97, 0x31, 0xf3, 0xbf, 0x05,
- 0xf7, 0x3c, 0x53, 0x2d, 0x31, 0x72, 0x02, 0x6e, 0x52, 0x5d, 0xf4, 0x07, 0x87, 0xac, 0xc7, 0xa5,
- 0x8a, 0x0b, 0xd6, 0x41, 0xfe, 0x37, 0x30, 0xac, 0xcc, 0x55, 0x1f, 0xd6, 0xb6, 0x3e, 0x7a, 0x1b,
- 0x7d, 0xf8, 0xff, 0x5a, 0xb0, 0xdf, 0xa4, 0xac, 0x1f, 0xd5, 0x25, 0xec, 0xd5, 0x25, 0xe6, 0xab,
- 0x30, 0xd7, 0x5c, 0x4e, 0x4c, 0x2e, 0xed, 0xb4, 0x9a, 0x20, 0x7b, 0x15, 0xe6, 0x6a, 0x38, 0xc6,
- 0x89, 0x61, 0x9a, 0xfe, 0x00, 0x1f, 0xb5, 0x42, 0x04, 0xeb, 0x6b, 0xac, 0x66, 0x58, 0x1c, 0xc9,
- 0x63, 0x70, 0x6e, 0xc2, 0xa4, 0x44, 0xbd, 0x30, 0x93, 0xf6, 0x13, 0x60, 0x81, 0x8a, 0xf8, 0xba,
- 0xf7, 0xc4, 0x3a, 0xfd, 0xc7, 0x81, 0xf1, 0x05, 0x86, 0xbf, 0x22, 0xc6, 0x42, 0x3e, 0x0a, 0xb2,
- 0xac, 0xba, 0x6a, 0xfe, 0x46, 0x90, 0x47, 0x9b, 0xf4, 0x3b, 0xff, 0x5b, 0xa6, 0x9f, 0xbf, 0x2d,
- 0x4c, 0x0f, 0xfc, 0x0e, 0x39, 0x87, 0x91, 0xf1, 0xd3, 0x40, 0x0e, 0x8c, 0xc4, 0xd6, 0xff, 0xc7,
- 0xf4, 0x70, 0x8b, 0xb7, 0x46, 0x0b, 0x81, 0xb4, 0x85, 0x9b, 0x7c, 0xb6, 0x4e, 0xdb, 0xfa, 0x01,
- 0x99, 0x3e, 0x7c, 0x73, 0x90, 0x49, 0xd8, 0x50, 0x35, 0x93, 0x70, 0x5b, 0x47, 0x4d, 0xc2, 0x5d,
- 0x52, 0x28, 0xd1, 0x0c, 0xc5, 0x32, 0xd1, 0xda, 0x1a, 0x69, 0xa2, 0x75, 0xc9, 0x9c, 0x44, 0x33,
- 0x64, 0xc5, 0x44, 0x6b, 0xcb, 0x9f, 0x89, 0xd6, 0xa5, 0x45, 0x3b, 0xe4, 0x35, 0x8c, 0x4d, 0x11,
- 0x20, 0x46, 0x42, 0x87, 0x48, 0x4d, 0x1f, 0x6c, 0x73, 0x9b, 0x80, 0xe6, 0xcc, 0x9b, 0x80, 0x1d,
- 0x5b, 0x6f, 0x02, 0x76, 0xad, 0x8a, 0xbf, 0x73, 0x35, 0x90, 0xbf, 0xd3, 0x5f, 0xfe, 0x17, 0x00,
- 0x00, 0xff, 0xff, 0x00, 0xdc, 0x65, 0x5f, 0x5d, 0x0b, 0x00, 0x00,
+ // 1108 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0xdc, 0x44,
+ 0x14, 0x8f, 0xd7, 0xbb, 0x9b, 0xf8, 0xed, 0xa6, 0xb4, 0xb3, 0x69, 0x6b, 0xb6, 0x49, 0xd9, 0x0e,
+ 0x2d, 0x4a, 0x05, 0x8a, 0xa2, 0xc0, 0xa1, 0x80, 0x90, 0xa8, 0x92, 0xb6, 0xaa, 0x94, 0x52, 0xc9,
+ 0x21, 0x48, 0x88, 0xc3, 0xca, 0xb1, 0x67, 0xc3, 0x28, 0x5e, 0xdb, 0xcc, 0x8c, 0x03, 0xe1, 0xca,
+ 0x91, 0x13, 0x1f, 0x02, 0x71, 0xe7, 0x03, 0x70, 0xe1, 0x8b, 0xa1, 0xf9, 0x63, 0xef, 0x78, 0xed,
+ 0x4d, 0xdb, 0x03, 0xb7, 0x99, 0xf7, 0xde, 0xfc, 0xe6, 0xf7, 0xde, 0xbc, 0xf7, 0xf3, 0x2e, 0x0c,
+ 0x66, 0x34, 0x21, 0x6c, 0x2f, 0x67, 0x99, 0xc8, 0xd0, 0x86, 0xda, 0x4c, 0xf3, 0x33, 0xfc, 0x1a,
+ 0xee, 0x1d, 0x67, 0xd9, 0x45, 0x91, 0x1f, 0x51, 0x46, 0x22, 0x91, 0xb1, 0xab, 0x67, 0xa9, 0x60,
+ 0x57, 0x01, 0xf9, 0xa9, 0x20, 0x5c, 0xa0, 0x6d, 0xf0, 0xe2, 0xd2, 0xe1, 0x3b, 0x13, 0x67, 0xd7,
+ 0x0b, 0x16, 0x06, 0x84, 0xa0, 0x9b, 0x86, 0x73, 0xe2, 0x77, 0x94, 0x43, 0xad, 0xf1, 0x33, 0xd8,
+ 0x6e, 0x07, 0xe4, 0x79, 0x96, 0x72, 0x82, 0x1e, 0x41, 0x8f, 0x48, 0x83, 0x42, 0x1b, 0x1c, 0xbc,
+ 0xb7, 0x57, 0x52, 0xd9, 0xd3, 0x71, 0xda, 0x8b, 0xff, 0x71, 0x00, 0x1d, 0x53, 0x2e, 0xa4, 0x91,
+ 0x12, 0xfe, 0x76, 0x7c, 0xee, 0x40, 0x3f, 0x67, 0x64, 0x46, 0x7f, 0x31, 0x8c, 0xcc, 0x0e, 0x7d,
+ 0x02, 0xb7, 0xb8, 0x08, 0x99, 0x78, 0xce, 0xb2, 0xf9, 0x73, 0x9a, 0x90, 0x6f, 0x24, 0x69, 0x57,
+ 0x85, 0x34, 0x1d, 0x68, 0x0f, 0x10, 0x4d, 0xa3, 0xa4, 0xe0, 0xf4, 0x92, 0x9c, 0x94, 0x5e, 0xbf,
+ 0x3b, 0x71, 0x76, 0x37, 0x82, 0x16, 0x0f, 0xda, 0x82, 0x5e, 0x42, 0xe7, 0x54, 0xf8, 0xbd, 0x89,
+ 0xb3, 0xbb, 0x19, 0xe8, 0x0d, 0xfe, 0x1a, 0x46, 0x35, 0xfe, 0x26, 0xfd, 0xc7, 0xb0, 0x4e, 0xb4,
+ 0xc9, 0x77, 0x26, 0x6e, 0x5b, 0x01, 0x4a, 0x3f, 0xfe, 0xd3, 0x81, 0x9e, 0x32, 0x55, 0x75, 0x76,
+ 0x16, 0x75, 0x46, 0x0f, 0x60, 0x48, 0xf9, 0x74, 0x51, 0x8c, 0x8e, 0xe2, 0x37, 0xa0, 0xbc, 0xaa,
+ 0x3b, 0xfa, 0x18, 0xfa, 0xd1, 0x8f, 0x45, 0x7a, 0xc1, 0x7d, 0x57, 0x5d, 0x35, 0x5a, 0x5c, 0x25,
+ 0x93, 0x3d, 0x94, 0xbe, 0xc0, 0x84, 0xa0, 0x27, 0x00, 0xa1, 0x10, 0x8c, 0x9e, 0x15, 0x82, 0x70,
+ 0x95, 0xed, 0xe0, 0xc0, 0xb7, 0x0e, 0x14, 0x9c, 0x3c, 0xad, 0xfc, 0x81, 0x15, 0x8b, 0x67, 0xe0,
+ 0x55, 0x70, 0xe8, 0x2e, 0xac, 0xcb, 0x33, 0x53, 0x1a, 0x1b, 0xb6, 0x7d, 0xb9, 0x7d, 0x19, 0xcb,
+ 0xb7, 0xc9, 0x66, 0x33, 0x4e, 0x84, 0x62, 0xea, 0x06, 0x66, 0x27, 0x73, 0xe3, 0xf4, 0x57, 0xfd,
+ 0x1c, 0xdd, 0x40, 0xad, 0x65, 0x45, 0xe7, 0x82, 0xce, 0x89, 0xa2, 0xe1, 0x06, 0x7a, 0x83, 0x7f,
+ 0xef, 0xc0, 0x8d, 0x3a, 0x0d, 0x74, 0x0f, 0x3c, 0x75, 0x9b, 0x42, 0x70, 0x14, 0x82, 0x6a, 0xed,
+ 0x93, 0x1a, 0x4a, 0xc7, 0x42, 0xa9, 0x8e, 0xcc, 0xb3, 0x58, 0x5f, 0xba, 0xa9, 0x8f, 0xbc, 0xca,
+ 0x62, 0x82, 0x6e, 0x82, 0x5b, 0xd0, 0x58, 0x5d, 0xbb, 0x19, 0xc8, 0xa5, 0xb4, 0x9c, 0xd3, 0xd8,
+ 0x3c, 0xad, 0x5c, 0xca, 0x44, 0x22, 0xa6, 0x70, 0xfb, 0x3a, 0x11, 0xbd, 0x93, 0x89, 0xcc, 0xa5,
+ 0x75, 0x5d, 0x3f, 0x92, 0x5c, 0xa3, 0x09, 0x0c, 0x18, 0xc9, 0x13, 0x1a, 0x85, 0x82, 0x66, 0xa9,
+ 0xbf, 0xa1, 0x5c, 0xb6, 0x09, 0xdd, 0x07, 0x88, 0xb2, 0x24, 0x21, 0x91, 0x0a, 0xf0, 0x54, 0x80,
+ 0x65, 0x91, 0xf5, 0x14, 0x22, 0x99, 0x72, 0x12, 0xf9, 0x30, 0x71, 0x76, 0x7b, 0x41, 0x5f, 0x88,
+ 0xe4, 0x84, 0x44, 0xf8, 0x1c, 0xde, 0x7f, 0x41, 0x54, 0x7b, 0x5d, 0x59, 0xef, 0x62, 0xc6, 0xa4,
+ 0xad, 0x61, 0x76, 0x00, 0xf2, 0x90, 0x91, 0x54, 0xc8, 0xa6, 0x31, 0x03, 0xe2, 0x69, 0xcb, 0x11,
+ 0x65, 0xf6, 0xc3, 0xb9, 0xf6, 0xc3, 0xe1, 0xdf, 0x1c, 0x18, 0xb7, 0xdd, 0x64, 0x1a, 0xba, 0xde,
+ 0x37, 0xce, 0xdb, 0xf7, 0x8d, 0xd5, 0x9e, 0x9d, 0x37, 0xb6, 0x27, 0xde, 0x87, 0xdb, 0x2f, 0x88,
+ 0x50, 0xf6, 0x2c, 0x15, 0x24, 0x15, 0x65, 0xaa, 0xab, 0x1a, 0x0e, 0x1f, 0xc0, 0x9d, 0xe5, 0x13,
+ 0x86, 0xb2, 0x0f, 0xeb, 0x91, 0x36, 0xa9, 0x23, 0xc3, 0xa0, 0xdc, 0xe2, 0xef, 0x01, 0x1d, 0x32,
+ 0x12, 0x0a, 0xf2, 0x0e, 0x22, 0x58, 0x09, 0x5a, 0xe7, 0x5a, 0x41, 0xbb, 0x0d, 0xa3, 0x1a, 0xb4,
+ 0xe6, 0x22, 0x6f, 0x3c, 0xcd, 0xe3, 0xff, 0xeb, 0xc6, 0x1a, 0xb4, 0xb9, 0xf1, 0x6f, 0x07, 0xd0,
+ 0x11, 0x49, 0xc8, 0x3b, 0x5d, 0xd9, 0xa2, 0xf4, 0x0d, 0x05, 0x72, 0x9b, 0x0a, 0xf4, 0x10, 0x6e,
+ 0xc8, 0x10, 0x75, 0xdb, 0x34, 0x0e, 0x45, 0x68, 0x64, 0x74, 0x48, 0xb9, 0xa6, 0x70, 0x14, 0x8a,
+ 0xd0, 0x00, 0x31, 0x12, 0x15, 0x4c, 0x2a, 0xab, 0x1a, 0x36, 0x05, 0x14, 0x94, 0x26, 0x99, 0x4b,
+ 0x8d, 0xb3, 0xc9, 0xe5, 0x2f, 0x07, 0x46, 0x4f, 0x39, 0xa7, 0xe7, 0xe9, 0x77, 0x59, 0x52, 0xcc,
+ 0x49, 0x99, 0xcc, 0x16, 0xf4, 0xa2, 0xac, 0x30, 0xef, 0xdb, 0x0b, 0xf4, 0x66, 0x69, 0xd6, 0x3a,
+ 0x8d, 0x59, 0x5b, 0x9a, 0x56, 0xb7, 0x39, 0xad, 0xd6, 0x34, 0x76, 0xed, 0x69, 0x44, 0x1f, 0xc0,
+ 0x40, 0xa6, 0x37, 0x8d, 0x48, 0x2a, 0x08, 0x53, 0x19, 0x78, 0x01, 0x48, 0xd3, 0xa1, 0xb2, 0xe0,
+ 0x4b, 0xd8, 0xaa, 0x13, 0x35, 0xbd, 0xb8, 0x52, 0x2f, 0xa5, 0x14, 0xb1, 0xc4, 0xb0, 0x94, 0x4b,
+ 0x35, 0xc0, 0xc5, 0x59, 0x42, 0xa3, 0xa9, 0x74, 0xb8, 0x66, 0x80, 0x95, 0xe5, 0x94, 0x25, 0x8b,
+ 0x9c, 0xbb, 0x56, 0xce, 0xf8, 0x33, 0x18, 0xe9, 0xcf, 0x71, 0xbd, 0x40, 0x3b, 0x00, 0x97, 0xca,
+ 0x30, 0xa5, 0xb1, 0xfe, 0x12, 0x79, 0x81, 0xa7, 0x2d, 0x2f, 0x63, 0x8e, 0xbf, 0x02, 0xef, 0x38,
+ 0xd3, 0x39, 0x73, 0xb4, 0x0f, 0x5e, 0x52, 0x6e, 0xcc, 0x47, 0x0b, 0x2d, 0x5a, 0xae, 0x8c, 0x0b,
+ 0x16, 0x41, 0xf8, 0x4b, 0xd8, 0x28, 0xcd, 0x65, 0x1e, 0xce, 0xaa, 0x3c, 0x3a, 0x4b, 0x79, 0xe0,
+ 0x7f, 0x1d, 0xd8, 0xaa, 0x53, 0x36, 0xa5, 0x3a, 0x85, 0xcd, 0xea, 0x8a, 0xe9, 0x3c, 0xcc, 0x0d,
+ 0x97, 0x7d, 0x9b, 0x4b, 0xf3, 0x58, 0x45, 0x90, 0xbf, 0x0a, 0x73, 0xdd, 0x3d, 0xc3, 0xc4, 0x32,
+ 0x8d, 0xbf, 0x85, 0x5b, 0x8d, 0x10, 0xc9, 0xfa, 0x82, 0x94, 0x73, 0x20, 0x97, 0xe8, 0x31, 0xf4,
+ 0x2e, 0xc3, 0xa4, 0x20, 0x66, 0xe8, 0x46, 0xcd, 0x0a, 0xf0, 0x40, 0x47, 0x7c, 0xd1, 0x79, 0xe2,
+ 0xe0, 0xcf, 0xe1, 0xae, 0x6e, 0xd8, 0xc3, 0xaa, 0xbf, 0xca, 0xda, 0xd7, 0xdb, 0xd0, 0x59, 0x6e,
+ 0x43, 0x3c, 0x06, 0xbf, 0x79, 0x54, 0x27, 0x73, 0xf0, 0x47, 0x1f, 0x86, 0x27, 0x24, 0xfc, 0x99,
+ 0x90, 0x58, 0x2a, 0x1b, 0x43, 0xe7, 0x65, 0xb1, 0xea, 0x3f, 0xb7, 0xd0, 0xa3, 0xe5, 0xaa, 0xb4,
+ 0xfe, 0xbe, 0x1b, 0x7f, 0xf4, 0xa6, 0x30, 0x33, 0x68, 0x6b, 0xe8, 0x18, 0x06, 0xd6, 0xef, 0x19,
+ 0xb4, 0x6d, 0x1d, 0x6c, 0xfc, 0x4c, 0x1b, 0xef, 0xac, 0xf0, 0x56, 0x68, 0x21, 0xa0, 0xe6, 0x37,
+ 0x05, 0x7d, 0xb8, 0x38, 0xb6, 0xf2, 0xdb, 0x36, 0x7e, 0x78, 0x7d, 0x90, 0x4d, 0xd8, 0x12, 0x5c,
+ 0x9b, 0x70, 0x53, 0xe2, 0x6d, 0xc2, 0x6d, 0x2a, 0xad, 0xd0, 0x2c, 0x31, 0xb5, 0xd1, 0x9a, 0xf2,
+ 0x6d, 0xa3, 0xb5, 0x29, 0xb0, 0x42, 0xb3, 0xe4, 0xcc, 0x46, 0x6b, 0x2a, 0xb3, 0x8d, 0xd6, 0xa6,
+ 0x81, 0x6b, 0xe8, 0x35, 0x0c, 0x6d, 0x6d, 0x41, 0xd6, 0x81, 0x16, 0x71, 0x1c, 0xdf, 0x5f, 0xe5,
+ 0xb6, 0x01, 0xed, 0x51, 0xb2, 0x01, 0x5b, 0xc4, 0xc4, 0x06, 0x6c, 0x9b, 0x40, 0xbc, 0x86, 0x7e,
+ 0x80, 0x9b, 0xcb, 0x2d, 0x8d, 0x1e, 0x2c, 0xa7, 0xd5, 0x98, 0x94, 0x31, 0xbe, 0x2e, 0xa4, 0x04,
+ 0x3f, 0xeb, 0xab, 0xff, 0x34, 0x9f, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x41, 0x12, 0xff, 0xda,
+ 0xe2, 0x0c, 0x00, 0x00,
}
diff --git a/weed/s3api/AmazonS3.xsd b/weed/s3api/AmazonS3.xsd
new file mode 100644
index 000000000..8016a6a83
--- /dev/null
+++ b/weed/s3api/AmazonS3.xsd
@@ -0,0 +1,692 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<xsd:schema
+ xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+ elementFormDefault="qualified"
+ targetNamespace="http://s3.amazonaws.com/doc/2006-03-01/">
+
+ <xsd:element name="CreateBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="MetadataEntry">
+ <xsd:sequence>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Value" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="CreateBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="CreateBucketReturn" type="tns:CreateBucketResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="Status">
+ <xsd:sequence>
+ <xsd:element name="Code" type="xsd:int"/>
+ <xsd:element name="Description" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="Result">
+ <xsd:sequence>
+ <xsd:element name="Status" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="CreateBucketResult">
+ <xsd:sequence>
+ <xsd:element name="BucketName" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="DeleteBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="DeleteBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="DeleteBucketResponse" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="BucketLoggingStatus">
+ <xsd:sequence>
+ <xsd:element name="LoggingEnabled" type="tns:LoggingSettings" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="LoggingSettings">
+ <xsd:sequence>
+ <xsd:element name="TargetBucket" type="xsd:string"/>
+ <xsd:element name="TargetPrefix" type="xsd:string"/>
+ <xsd:element name="TargetGrants" type="tns:AccessControlList" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="GetBucketLoggingStatus">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketLoggingStatusResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetBucketLoggingStatusResponse" type="tns:BucketLoggingStatus"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketLoggingStatus">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="BucketLoggingStatus" type="tns:BucketLoggingStatus"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketLoggingStatusResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectAccessControlPolicyResponse" type="tns:AccessControlPolicy"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetBucketAccessControlPolicyResponse" type="tns:AccessControlPolicy"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType abstract="true" name="Grantee"/>
+
+ <xsd:complexType name="User" abstract="true">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Grantee"/>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="AmazonCustomerByEmail">
+ <xsd:complexContent>
+ <xsd:extension base="tns:User">
+ <xsd:sequence>
+ <xsd:element name="EmailAddress" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="CanonicalUser">
+ <xsd:complexContent>
+ <xsd:extension base="tns:User">
+ <xsd:sequence>
+ <xsd:element name="ID" type="xsd:string"/>
+ <xsd:element name="DisplayName" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="Group">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Grantee">
+ <xsd:sequence>
+ <xsd:element name="URI" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:simpleType name="Permission">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="READ"/>
+ <xsd:enumeration value="WRITE"/>
+ <xsd:enumeration value="READ_ACP"/>
+ <xsd:enumeration value="WRITE_ACP"/>
+ <xsd:enumeration value="FULL_CONTROL"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="StorageClass">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="STANDARD"/>
+ <xsd:enumeration value="REDUCED_REDUNDANCY"/>
+ <xsd:enumeration value="GLACIER"/>
+ <xsd:enumeration value="UNKNOWN"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="Grant">
+ <xsd:sequence>
+ <xsd:element name="Grantee" type="tns:Grantee"/>
+ <xsd:element name="Permission" type="tns:Permission"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="AccessControlList">
+ <xsd:sequence>
+ <xsd:element name="Grant" type="tns:Grant" minOccurs="0" maxOccurs="100"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="CreateBucketConfiguration">
+ <xsd:sequence>
+ <xsd:element name="LocationConstraint" type="tns:LocationConstraint"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="LocationConstraint">
+ <xsd:simpleContent>
+ <xsd:extension base="xsd:string"/>
+ </xsd:simpleContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="AccessControlPolicy">
+ <xsd:sequence>
+ <xsd:element name="Owner" type="tns:CanonicalUser"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="SetObjectAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetObjectAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="GetMetadata" type="xsd:boolean"/>
+ <xsd:element name="GetData" type="xsd:boolean"/>
+ <xsd:element name="InlineData" type="xsd:boolean"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectResponse" type="tns:GetObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="GetObjectResult">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Result">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Data" type="xsd:base64Binary" nillable="true"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:element name="GetObjectExtended">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="GetMetadata" type="xsd:boolean"/>
+ <xsd:element name="GetData" type="xsd:boolean"/>
+ <xsd:element name="InlineData" type="xsd:boolean"/>
+ <xsd:element name="ByteRangeStart" type="xsd:long" minOccurs="0"/>
+ <xsd:element name="ByteRangeEnd" type="xsd:long" minOccurs="0"/>
+ <xsd:element name="IfModifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="IfUnmodifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="IfMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="IfNoneMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="ReturnCompleteObjectOnConditionFailure" type="xsd:boolean" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectExtendedResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectResponse" type="tns:GetObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="ContentLength" type="xsd:long"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="PutObjectResponse" type="tns:PutObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="PutObjectResult">
+ <xsd:sequence>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="PutObjectInline">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element minOccurs="0" maxOccurs="100" name="Metadata" type="tns:MetadataEntry"/>
+ <xsd:element name="Data" type="xsd:base64Binary"/>
+ <xsd:element name="ContentLength" type="xsd:long"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObjectInlineResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="PutObjectInlineResponse" type="tns:PutObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="DeleteObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="DeleteObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="DeleteObjectResponse" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Marker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int" minOccurs="0"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListBucketResponse" type="tns:ListBucketResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListVersionsResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListVersionsResponse" type="tns:ListVersionsResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="ListEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="Size" type="xsd:long"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="VersionEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="VersionId" type="xsd:string"/>
+ <xsd:element name="IsLatest" type="xsd:boolean"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="Size" type="xsd:long"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="DeleteMarkerEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="VersionId" type="xsd:string"/>
+ <xsd:element name="IsLatest" type="xsd:boolean"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="PrefixEntry">
+ <xsd:sequence>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListBucketResult">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ <xsd:element name="Marker" type="xsd:string"/>
+ <xsd:element name="NextMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="IsTruncated" type="xsd:boolean"/>
+ <xsd:element name="Contents" type="tns:ListEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="CommonPrefixes" type="tns:PrefixEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListVersionsResult">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ <xsd:element name="KeyMarker" type="xsd:string"/>
+ <xsd:element name="VersionIdMarker" type="xsd:string"/>
+ <xsd:element name="NextKeyMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="NextVersionIdMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="IsTruncated" type="xsd:boolean"/>
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Version" type="tns:VersionEntry"/>
+ <xsd:element name="DeleteMarker" type="tns:DeleteMarkerEntry"/>
+ </xsd:choice>
+ <xsd:element name="CommonPrefixes" type="tns:PrefixEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="ListAllMyBuckets">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListAllMyBucketsResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListAllMyBucketsResponse" type="tns:ListAllMyBucketsResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="ListAllMyBucketsEntry">
+ <xsd:sequence>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="CreationDate" type="xsd:dateTime"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListAllMyBucketsResult">
+ <xsd:sequence>
+ <xsd:element name="Owner" type="tns:CanonicalUser"/>
+ <xsd:element name="Buckets" type="tns:ListAllMyBucketsList"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListAllMyBucketsList">
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="tns:ListAllMyBucketsEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="PostResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Location" type="xsd:anyURI"/>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:simpleType name="MetadataDirective">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="COPY"/>
+ <xsd:enumeration value="REPLACE"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:element name="CopyObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="SourceBucket" type="xsd:string"/>
+ <xsd:element name="SourceKey" type="xsd:string"/>
+ <xsd:element name="DestinationBucket" type="xsd:string"/>
+ <xsd:element name="DestinationKey" type="xsd:string"/>
+ <xsd:element name="MetadataDirective" type="tns:MetadataDirective" minOccurs="0"/>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="CopySourceIfModifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="CopySourceIfUnmodifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="CopySourceIfMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="CopySourceIfNoneMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="CopyObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="CopyObjectResult" type="tns:CopyObjectResult" />
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="CopyObjectResult">
+ <xsd:sequence>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="RequestPaymentConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Payer" type="tns:Payer" minOccurs="1" maxOccurs="1"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:simpleType name="Payer">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="BucketOwner"/>
+ <xsd:enumeration value="Requester"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="VersioningConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Status" type="tns:VersioningStatus" minOccurs="0"/>
+ <xsd:element name="MfaDelete" type="tns:MfaDeleteStatus" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:simpleType name="MfaDeleteStatus">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="Enabled"/>
+ <xsd:enumeration value="Disabled"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="VersioningStatus">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="Enabled"/>
+ <xsd:enumeration value="Suspended"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="NotificationConfiguration">
+ <xsd:sequence>
+ <xsd:element name="TopicConfiguration" minOccurs="0" maxOccurs="unbounded" type="tns:TopicConfiguration"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="TopicConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Topic" minOccurs="1" maxOccurs="1" type="xsd:string"/>
+ <xsd:element name="Event" minOccurs="1" maxOccurs="unbounded" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+</xsd:schema> \ No newline at end of file
diff --git a/weed/s3api/README.txt b/weed/s3api/README.txt
new file mode 100644
index 000000000..10a18ff4d
--- /dev/null
+++ b/weed/s3api/README.txt
@@ -0,0 +1,7 @@
+see https://blog.aqwari.net/xml-schema-go/
+
+1. go get aqwari.net/xml/cmd/xsdgen
+2. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd
+
+
+
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
new file mode 100644
index 000000000..af05d0a93
--- /dev/null
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -0,0 +1,177 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/gorilla/mux"
+ "net/http"
+ "os"
+ "time"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
+
+ var response ListAllMyBucketsResponse
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.ListEntriesRequest{
+ Directory: s3a.option.BucketsPath,
+ }
+
+ glog.V(4).Infof("read directory: %v", request)
+ resp, err := client.ListEntries(context.Background(), request)
+ if err != nil {
+ return fmt.Errorf("list buckets: %v", err)
+ }
+
+ var buckets []ListAllMyBucketsEntry
+ for _, entry := range resp.Entries {
+ if entry.IsDirectory {
+ buckets = append(buckets, ListAllMyBucketsEntry{
+ Name: entry.Name,
+ CreationDate: time.Unix(entry.Attributes.Crtime, 0),
+ })
+ }
+ }
+
+ response = ListAllMyBucketsResponse{
+ ListAllMyBucketsResponse: ListAllMyBucketsResult{
+ Owner: CanonicalUser{
+ ID: "",
+ DisplayName: "",
+ },
+ Buckets: ListAllMyBucketsList{
+ Bucket: buckets,
+ },
+ },
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+}
+
+func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.CreateEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Entry: &filer_pb.Entry{
+ Name: bucket,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ },
+ }
+
+ glog.V(1).Infof("create bucket: %v", request)
+ if _, err := client.CreateEntry(context.Background(), request); err != nil {
+ return fmt.Errorf("mkdir %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ // lazily create collection
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
+
+func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx := context.Background()
+
+ // delete collection
+ deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
+ Collection: bucket,
+ }
+
+ glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
+ if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
+ return fmt.Errorf("delete collection %s: %v", bucket, err)
+ }
+
+ // delete bucket metadata
+ request := &filer_pb.DeleteEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Name: bucket,
+ IsDirectory: true,
+ IsDeleteData: false,
+ IsRecursive: true,
+ }
+
+ glog.V(1).Infof("delete bucket: %v", request)
+ if _, err := client.DeleteEntry(ctx, request); err != nil {
+ return fmt.Errorf("delete bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeResponse(w, http.StatusNoContent, nil, mimeNone)
+}
+
+func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Name: bucket,
+ }
+
+ glog.V(1).Infof("lookup bucket: %v", request)
+ if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil {
+ return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrNoSuchBucket, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go
new file mode 100644
index 000000000..e5ce8df5a
--- /dev/null
+++ b/weed/s3api/s3api_errors.go
@@ -0,0 +1,100 @@
+package s3api
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+// APIError structure
+type APIError struct {
+ Code string
+ Description string
+ HTTPStatusCode int
+}
+
+// RESTErrorResponse - error response format
+type RESTErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string `xml:"Code" json:"Code"`
+ Message string `xml:"Message" json:"Message"`
+ Resource string `xml:"Resource" json:"Resource"`
+ RequestID string `xml:"RequestId" json:"RequestId"`
+}
+
+// ErrorCode type of error status.
+type ErrorCode int
+
+// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+const (
+ ErrNone ErrorCode = iota
+ ErrMethodNotAllowed
+ ErrBucketNotEmpty
+ ErrBucketAlreadyExists
+ ErrBucketAlreadyOwnedByYou
+ ErrNoSuchBucket
+ ErrInvalidBucketName
+ ErrInvalidDigest
+ ErrInvalidMaxKeys
+ ErrInternalError
+ ErrNotImplemented
+)
+
+// error code to APIError structure, these fields carry respective
+// descriptions for all the error responses.
+var errorCodeResponse = map[ErrorCode]APIError{
+ ErrMethodNotAllowed: {
+ Code: "MethodNotAllowed",
+ Description: "The specified method is not allowed against this resource.",
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ },
+ ErrBucketNotEmpty: {
+ Code: "BucketNotEmpty",
+ Description: "The bucket you tried to delete is not empty",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyExists: {
+ Code: "BucketAlreadyExists",
+ Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyOwnedByYou: {
+ Code: "BucketAlreadyOwnedByYou",
+ Description: "Your previous request to create the named bucket succeeded and you already own it.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrInvalidBucketName: {
+ Code: "InvalidBucketName",
+ Description: "The specified bucket is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidDigest: {
+ Code: "InvalidDigest",
+ Description: "The Content-Md5 you specified is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidMaxKeys: {
+ Code: "InvalidArgument",
+ Description: "Argument maxKeys must be an integer between 0 and 2147483647",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNoSuchBucket: {
+ Code: "NoSuchBucket",
+ Description: "The specified bucket does not exist",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ ErrInternalError: {
+ Code: "InternalError",
+ Description: "We encountered an internal error, please try again.",
+ HTTPStatusCode: http.StatusInternalServerError,
+ },
+ ErrNotImplemented: {
+ Code: "NotImplemented",
+ Description: "A header you provided implies functionality that is not implemented",
+ HTTPStatusCode: http.StatusNotImplemented,
+ },
+}
+
+// getAPIError provides API Error for input API error code.
+func getAPIError(code ErrorCode) APIError {
+ return errorCodeResponse[code]
+}
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
new file mode 100644
index 000000000..ab1fc7038
--- /dev/null
+++ b/weed/s3api/s3api_handlers.go
@@ -0,0 +1,100 @@
+package s3api
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+type mimeType string
+
+const (
+ mimeNone mimeType = ""
+ mimeJSON mimeType = "application/json"
+ mimeXML mimeType = "application/xml"
+)
+
+func setCommonHeaders(w http.ResponseWriter) {
+ w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
+ w.Header().Set("Accept-Ranges", "bytes")
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ e := xml.NewEncoder(&bytesBuffer)
+ e.Encode(response)
+ return bytesBuffer.Bytes()
+}
+
+func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress)
+ if err != nil {
+ return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err)
+ }
+ defer grpcConnection.Close()
+
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+
+ return fn(client)
+}
+
+// If none of the http routes match respond with MethodNotAllowed
+func notFoundHandler(w http.ResponseWriter, r *http.Request) {
+ writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
+}
+
+func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) {
+ apiError := getAPIError(errorCode)
+ errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
+ encodedErrorResponse := encodeResponse(errorResponse)
+ writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
+}
+
+func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
+ return RESTErrorResponse{
+ Code: err.Code,
+ Message: err.Description,
+ Resource: resource,
+ RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
+ }
+}
+
+func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
+ setCommonHeaders(w)
+ if mType != mimeNone {
+ w.Header().Set("Content-Type", string(mType))
+ }
+ w.WriteHeader(statusCode)
+ if response != nil {
+ w.Write(response)
+ w.(http.Flusher).Flush()
+ }
+}
+
+func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
+ writeResponse(w, http.StatusOK, response, mimeXML)
+}
+
+func writeSuccessResponseEmpty(w http.ResponseWriter) {
+ writeResponse(w, http.StatusOK, nil, mimeNone)
+}
+
+func validateContentMd5(h http.Header) ([]byte, error) {
+ md5B64, ok := h["Content-Md5"]
+ if ok {
+ if md5B64[0] == "" {
+ return nil, fmt.Errorf("Content-Md5 header set to empty value")
+ }
+ return base64.StdEncoding.DecodeString(md5B64[0])
+ }
+ return []byte{}, nil
+}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
new file mode 100644
index 000000000..a9be99d2f
--- /dev/null
+++ b/weed/s3api/s3api_object_handlers.go
@@ -0,0 +1,163 @@
+package s3api
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/gorilla/mux"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+var (
+ client *http.Client
+)
+
+func init() {
+ client = &http.Client{Transport: &http.Transport{
+ MaxIdleConnsPerHost: 1024,
+ }}
+}
+
+type UploadResult struct {
+ Name string `json:"name,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+ object := vars["object"]
+
+ _, err := validateContentMd5(r.Header)
+ if err != nil {
+ writeErrorResponse(w, ErrInvalidDigest, r.URL)
+ return
+ }
+
+ uploadUrl := fmt.Sprintf("http://%s%s/%s/%s?collection=%s",
+ s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket)
+ proxyReq, err := http.NewRequest("PUT", uploadUrl, r.Body)
+
+ if err != nil {
+ glog.Errorf("NewRequest %s: %v", uploadUrl, err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ proxyReq.Header.Set("Host", s3a.option.Filer)
+ proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
+
+ for header, values := range r.Header {
+ for _, value := range values {
+ proxyReq.Header.Add(header, value)
+ }
+ }
+
+ resp, postErr := client.Do(proxyReq)
+
+ if postErr != nil {
+ glog.Errorf("post to filer: %v", postErr)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ defer resp.Body.Close()
+
+ resp_body, ra_err := ioutil.ReadAll(resp.Body)
+ if ra_err != nil {
+ glog.Errorf("upload to filer response read: %v", ra_err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ var ret UploadResult
+ unmarshal_err := json.Unmarshal(resp_body, &ret)
+ if unmarshal_err != nil {
+ glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ if ret.Error != "" {
+ glog.Errorf("upload to filer error: %v", ret.Error)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
+
+func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, passThroghResponse)
+
+}
+
+func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, passThroghResponse)
+
+}
+
+func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResonse.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+}
+
+func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) {
+
+ glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
+
+ proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
+
+ if err != nil {
+ glog.Errorf("NewRequest %s: %v", destUrl, err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ proxyReq.Header.Set("Host", s3a.option.Filer)
+ proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
+
+ for header, values := range r.Header {
+ for _, value := range values {
+ proxyReq.Header.Add(header, value)
+ }
+ }
+
+ resp, postErr := client.Do(proxyReq)
+
+ if postErr != nil {
+ glog.Errorf("post to filer: %v", postErr)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ defer resp.Body.Close()
+
+ responseFn(resp, w)
+}
+func passThroghResponse(proxyResonse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResonse.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(proxyResonse.StatusCode)
+ io.Copy(w, proxyResonse.Body)
+}
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
new file mode 100644
index 000000000..4714fdf14
--- /dev/null
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -0,0 +1,179 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/gorilla/mux"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strconv"
+ "time"
+)
+
+const (
+ maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
+)
+
+func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
+
+ // collect parameters
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
+
+ if maxKeys < 0 {
+ writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
+ return
+ }
+ if delimiter != "" && delimiter != "/" {
+ writeErrorResponse(w, ErrNotImplemented, r.URL)
+ return
+ }
+
+ if marker == "" {
+ marker = startAfter
+ }
+
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+}
+
+func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
+
+ // collect parameters
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
+
+ if maxKeys < 0 {
+ writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
+ return
+ }
+ if delimiter != "" && delimiter != "/" {
+ writeErrorResponse(w, ErrNotImplemented, r.URL)
+ return
+ }
+
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+}
+
+func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResponse, err error) {
+
+ // convert full path prefix into directory name and prefix for entry name
+ dir, prefix := filepath.Split(originalPrefix)
+
+ // check filer
+ err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.ListEntriesRequest{
+ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
+ Prefix: prefix,
+ Limit: uint32(maxKeys + 1),
+ StartFromFileName: marker,
+ InclusiveStartFrom: false,
+ }
+
+ glog.V(4).Infof("read directory: %v", request)
+ resp, err := client.ListEntries(context.Background(), request)
+ if err != nil {
+ return fmt.Errorf("list buckets: %v", err)
+ }
+
+ var contents []ListEntry
+ var commonPrefixes []PrefixEntry
+ var counter int
+ var lastEntryName string
+ var isTruncated bool
+ for _, entry := range resp.Entries {
+ counter++
+ if counter > maxKeys {
+ isTruncated = true
+ break
+ }
+ lastEntryName = entry.Name
+ if entry.IsDirectory {
+ commonPrefixes = append(commonPrefixes, PrefixEntry{
+ Prefix: fmt.Sprintf("%s%s/", dir, entry.Name),
+ })
+ } else {
+ contents = append(contents, ListEntry{
+ Key: fmt.Sprintf("%s%s", dir, entry.Name),
+ LastModified: time.Unix(entry.Attributes.Mtime, 0),
+ ETag: "", // TODO add etag
+ Size: int64(filer2.TotalSize(entry.Chunks)),
+ Owner: CanonicalUser{
+ ID: fmt.Sprintf("%d", entry.Attributes.Uid),
+ },
+ StorageClass: StorageClass("STANDARD"),
+ })
+ }
+ }
+
+ response = ListBucketResponse{
+ ListBucketResponse: ListBucketResult{
+ Name: bucket,
+ Prefix: originalPrefix,
+ Marker: marker,
+ NextMarker: lastEntryName,
+ MaxKeys: maxKeys,
+ Delimiter: "/",
+ IsTruncated: isTruncated,
+ Contents: contents,
+ CommonPrefixes: commonPrefixes,
+ },
+ }
+
+ return nil
+ })
+
+ return
+}
+
+func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) {
+ prefix = values.Get("prefix")
+ token = values.Get("continuation-token")
+ startAfter = values.Get("start-after")
+ delimiter = values.Get("delimiter")
+ if values.Get("max-keys") != "" {
+ maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
+ } else {
+ maxkeys = maxObjectListSizeLimit
+ }
+ fetchOwner = values.Get("fetch-owner") == "true"
+ return
+}
+
+func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) {
+ prefix = values.Get("prefix")
+ marker = values.Get("marker")
+ delimiter = values.Get("delimiter")
+ if values.Get("max-keys") != "" {
+ maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
+ } else {
+ maxkeys = maxObjectListSizeLimit
+ }
+ return
+}
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
new file mode 100644
index 000000000..b33c6f07b
--- /dev/null
+++ b/weed/s3api/s3api_server.go
@@ -0,0 +1,115 @@
+package s3api
+
+import (
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
+ "github.com/gorilla/mux"
+ "net/http"
+)
+
+type S3ApiServerOption struct {
+ Filer string
+ FilerGrpcAddress string
+ DomainName string
+ BucketsPath string
+}
+
+type S3ApiServer struct {
+ option *S3ApiServerOption
+}
+
+func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
+ s3ApiServer = &S3ApiServer{
+ option: option,
+ }
+
+ s3ApiServer.registerRouter(router)
+
+ return s3ApiServer, nil
+}
+
+func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
+ // API Router
+ apiRouter := router.PathPrefix("/").Subrouter()
+ var routers []*mux.Router
+ if s3a.option.DomainName != "" {
+ routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter())
+ }
+ routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
+
+ for _, bucket := range routers {
+
+ // PutObject
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler)
+ // PutBucket
+ bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler)
+
+ // HeadObject
+ bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler)
+ // HeadBucket
+ bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler)
+
+ // DeleteObject
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler)
+ // DeleteBucket
+ bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler)
+
+ // GetObject
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler)
+ // ListObjectsV2
+ bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2")
+ // ListObjectsV1 (Legacy)
+ bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler)
+
+ /*
+ // CopyObject
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler)
+
+ // CopyObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ // PutObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ // ListObjectParts
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
+ // CompleteMultipartUpload
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ // NewMultipartUpload
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "")
+ // AbortMultipartUpload
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+
+ // ListMultipartUploads
+ bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "")
+ // DeleteMultipleObjects
+ bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "")
+
+ // not implemented
+ // GetBucketLocation
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketLocationHandler).Queries("location", "")
+ // GetBucketPolicy
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "")
+ // GetObjectACL
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectACLHandler).Queries("acl", "")
+ // GetBucketACL
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketACLHandler).Queries("acl", "")
+ // PutBucketPolicy
+ bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
+ // DeleteBucketPolicy
+ bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "")
+ // PostPolicy
+ bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler)
+ */
+
+ }
+
+ // ListBuckets
+ apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler)
+
+ // NotFound
+ apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
+
+}
diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go
new file mode 100644
index 000000000..915b74ec4
--- /dev/null
+++ b/weed/s3api/s3api_xsd_generated.go
@@ -0,0 +1,1002 @@
+package s3api
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "time"
+)
+
+type AccessControlList struct {
+ Grant []Grant `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grant,omitempty"`
+}
+
+type AccessControlPolicy struct {
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
+}
+
+type AmazonCustomerByEmail struct {
+ EmailAddress string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ EmailAddress"`
+}
+
+type BucketLoggingStatus struct {
+ LoggingEnabled LoggingSettings `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LoggingEnabled,omitempty"`
+}
+
+type CanonicalUser struct {
+ ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"`
+ DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"`
+}
+
+type CopyObject struct {
+ SourceBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceBucket"`
+ SourceKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceKey"`
+ DestinationBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationBucket"`
+ DestinationKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationKey"`
+ MetadataDirective MetadataDirective `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MetadataDirective,omitempty"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ CopySourceIfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ CopySourceIfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfMatch,omitempty"`
+ CopySourceIfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfNoneMatch,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *CopyObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CopyObject
+ var layout struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince)
+ layout.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CopyObject
+ var overlay struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince)
+ overlay.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CopyObjectResponse struct {
+ CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"`
+}
+
+type CopyObjectResult struct {
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+}
+
+func (t *CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CopyObjectResult
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CopyObjectResult
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CreateBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+}
+
+func (t *CreateBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CreateBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CreateBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CreateBucketConfiguration struct {
+ LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
+}
+
+type CreateBucketResponse struct {
+ CreateBucketReturn CreateBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketReturn"`
+}
+
+type CreateBucketResult struct {
+ BucketName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketName"`
+}
+
+type DeleteBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *DeleteBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteBucketResponse struct {
+ DeleteBucketResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteBucketResponse"`
+}
+
+type DeleteMarkerEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
+ IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+}
+
+func (t *DeleteMarkerEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteMarkerEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteMarkerEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *DeleteObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteObjectResponse struct {
+ DeleteObjectResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteObjectResponse"`
+}
+
+type GetBucketAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetBucketAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetBucketAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetBucketAccessControlPolicyResponse struct {
+ GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketAccessControlPolicyResponse"`
+}
+
+type GetBucketLoggingStatus struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetBucketLoggingStatus
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetBucketLoggingStatus
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetBucketLoggingStatusResponse struct {
+ GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketLoggingStatusResponse"`
+}
+
+type GetObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
+ GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
+ InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectAccessControlPolicyResponse struct {
+ GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectAccessControlPolicyResponse"`
+}
+
+type GetObjectExtended struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
+ GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
+ InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
+ ByteRangeStart int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeStart,omitempty"`
+ ByteRangeEnd int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeEnd,omitempty"`
+ IfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ IfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfMatch,omitempty"`
+ IfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfNoneMatch,omitempty"`
+ ReturnCompleteObjectOnConditionFailure bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ReturnCompleteObjectOnConditionFailure,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObjectExtended) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectExtended
+ var layout struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince)
+ layout.IfUnmodifiedSince = (*xsdDateTime)(&layout.T.IfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectExtended
+ var overlay struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince)
+ overlay.IfUnmodifiedSince = (*xsdDateTime)(&overlay.T.IfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectExtendedResponse struct {
+ GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+}
+
+type GetObjectResponse struct {
+ GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+}
+
+type GetObjectResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+}
+
+func (t *GetObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectResult
+ var layout struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.Data = (*xsdBase64Binary)(&layout.T.Data)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectResult
+ var overlay struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Grant struct {
+ Grantee Grantee `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grantee"`
+ Permission Permission `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Permission"`
+}
+
+type Grantee struct {
+}
+
+type Group struct {
+ URI string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ URI"`
+}
+
+type ListAllMyBuckets struct {
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+}
+
+func (t *ListAllMyBuckets) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListAllMyBuckets
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListAllMyBuckets
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListAllMyBucketsEntry struct {
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+}
+
+func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListAllMyBucketsEntry
+ var layout struct {
+ *T
+ CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ }
+ layout.T = (*T)(t)
+ layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListAllMyBucketsEntry
+ var overlay struct {
+ *T
+ CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ }
+ overlay.T = (*T)(t)
+ overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListAllMyBucketsList struct {
+ Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"`
+}
+
+type ListAllMyBucketsResponse struct {
+ ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResponse"`
+}
+
+type ListAllMyBucketsResult struct {
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
+ Buckets ListAllMyBucketsList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Buckets"`
+}
+
+type ListBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix,omitempty"`
+ Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys,omitempty"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *ListBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListBucketResponse struct {
+ ListBucketResponse ListBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResponse"`
+}
+
+type ListBucketResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+ Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"`
+ NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
+ Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
+}
+
+type ListEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+}
+
+func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListVersionsResponse struct {
+ ListVersionsResponse ListVersionsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResponse"`
+}
+
+type ListVersionsResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+ KeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ KeyMarker"`
+ VersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionIdMarker"`
+ NextKeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextKeyMarker,omitempty"`
+ NextVersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextVersionIdMarker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
+ Version VersionEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version"`
+ DeleteMarker DeleteMarkerEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker"`
+ CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
+}
+
+type LoggingSettings struct {
+ TargetBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetBucket"`
+ TargetPrefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetPrefix"`
+ TargetGrants AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetGrants,omitempty"`
+}
+
+// May be one of COPY, REPLACE
+type MetadataDirective string
+
+type MetadataEntry struct {
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Value string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Value"`
+}
+
+// May be one of Enabled, Disabled
+type MfaDeleteStatus string
+
+type NotificationConfiguration struct {
+ TopicConfiguration []TopicConfiguration `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TopicConfiguration,omitempty"`
+}
+
+// May be one of BucketOwner, Requester
+type Payer string
+
+// May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL
+type Permission string
+
+type PostResponse struct {
+ Location string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Location"`
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+}
+
+type PrefixEntry struct {
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+}
+
+type PutObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *PutObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type PutObjectInline struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *PutObjectInline) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObjectInline
+ var layout struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Data = (*xsdBase64Binary)(&layout.T.Data)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObjectInline
+ var overlay struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type PutObjectInlineResponse struct {
+ PutObjectInlineResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectInlineResponse"`
+}
+
+type PutObjectResponse struct {
+ PutObjectResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectResponse"`
+}
+
+type PutObjectResult struct {
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+}
+
+func (t *PutObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObjectResult
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObjectResult
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type RequestPaymentConfiguration struct {
+ Payer Payer `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Payer"`
+}
+
+type Result struct {
+ Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+}
+
+type SetBucketAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *SetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetBucketAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetBucketAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetBucketAccessControlPolicyResponse struct {
+}
+
+type SetBucketLoggingStatus struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ BucketLoggingStatus BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketLoggingStatus"`
+}
+
+func (t *SetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetBucketLoggingStatus
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetBucketLoggingStatus
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetBucketLoggingStatusResponse struct {
+}
+
+type SetObjectAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *SetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetObjectAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetObjectAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetObjectAccessControlPolicyResponse struct {
+}
+
+type Status struct {
+ Code int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Code"`
+ Description string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Description"`
+}
+
+// May be one of STANDARD, REDUCED_REDUNDANCY, GLACIER, UNKNOWN
+type StorageClass string
+
+type TopicConfiguration struct {
+ Topic string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Topic"`
+ Event []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Event"`
+}
+
+type User struct {
+}
+
+type VersionEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
+ IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+}
+
+func (t *VersionEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T VersionEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T VersionEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type VersioningConfiguration struct {
+ Status VersioningStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status,omitempty"`
+ MfaDelete MfaDeleteStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MfaDelete,omitempty"`
+}
+
+// May be one of Enabled, Suspended
+type VersioningStatus string
+
+type xsdBase64Binary []byte
+
+func (b *xsdBase64Binary) UnmarshalText(text []byte) (err error) {
+ *b, err = base64.StdEncoding.DecodeString(string(text))
+ return
+}
+func (b xsdBase64Binary) MarshalText() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := base64.NewEncoder(base64.StdEncoding, &buf)
+ enc.Write([]byte(b))
+ enc.Close()
+ return buf.Bytes(), nil
+}
+
+type xsdDateTime time.Time
+
+func (t *xsdDateTime) UnmarshalText(text []byte) error {
+ return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999")
+}
+func (t xsdDateTime) MarshalText() ([]byte, error) {
+ return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil
+}
+func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if (time.Time)(t).IsZero() {
+ return nil
+ }
+ m, err := t.MarshalText()
+ if err != nil {
+ return err
+ }
+ return e.EncodeElement(m, start)
+}
+func (t xsdDateTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
+ if (time.Time)(t).IsZero() {
+ return xml.Attr{}, nil
+ }
+ m, err := t.MarshalText()
+ return xml.Attr{Name: name, Value: string(m)}, err
+}
+func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {
+ s := string(bytes.TrimSpace(text))
+ *t, err = time.Parse(format, s)
+ if _, ok := err.(*time.ParseError); ok {
+ *t, err = time.Parse(format+"Z07:00", s)
+ }
+ return err
+}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index d1c4c716c..b2f2d7a2d 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -11,7 +11,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"strconv"
+ "strings"
)
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
@@ -33,20 +35,44 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) {
- entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), "", false, 1000)
- if err != nil {
- return nil, err
+ limit := int(req.Limit)
+ if limit == 0 {
+ limit = fs.option.DirListingLimit
}
resp := &filer_pb.ListEntriesResponse{}
- for _, entry := range entries {
+ lastFileName := req.StartFromFileName
+ includeLastFile := req.InclusiveStartFrom
+ for limit > 0 {
+ entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), lastFileName, includeLastFile, limit)
+ if err != nil {
+ return nil, err
+ }
+ if len(entries) == 0 {
+ return resp, nil
+ }
+
+ includeLastFile = false
+
+ for _, entry := range entries {
+
+ lastFileName = entry.Name()
+
+ if req.Prefix != "" {
+ if !strings.HasPrefix(entry.Name(), req.Prefix) {
+ continue
+ }
+ }
+
+ resp.Entries = append(resp.Entries, &filer_pb.Entry{
+ Name: entry.Name(),
+ IsDirectory: entry.IsDirectory(),
+ Chunks: entry.Chunks,
+ Attributes: filer2.EntryAttributeToPb(entry),
+ })
+ limit--
+ }
- resp.Entries = append(resp.Entries, &filer_pb.Entry{
- Name: entry.Name(),
- IsDirectory: entry.IsDirectory(),
- Chunks: entry.Chunks,
- Attributes: filer2.EntryAttributeToPb(entry),
- })
}
return resp, nil
@@ -162,7 +188,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
}
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
- err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsDeleteData)
+ err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData)
return &filer_pb.DeleteEntryResponse{}, err
}
@@ -173,12 +199,30 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
ttlStr = strconv.Itoa(int(req.TtlSec))
}
- assignResult, err := operation.Assign(fs.filer.GetMaster(), &operation.VolumeAssignRequest{
+ var altRequest *operation.VolumeAssignRequest
+
+ dataCenter := req.DataCenter
+ if dataCenter == "" {
+ dataCenter = fs.option.DataCenter
+ }
+
+ assignRequest := &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: req.Replication,
Collection: req.Collection,
Ttl: ttlStr,
- })
+ DataCenter: dataCenter,
+ }
+ if dataCenter != "" {
+ altRequest = &operation.VolumeAssignRequest{
+ Count: uint64(req.Count),
+ Replication: req.Replication,
+ Collection: req.Collection,
+ Ttl: ttlStr,
+ DataCenter: "",
+ }
+ }
+ assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest)
if err != nil {
return nil, fmt.Errorf("assign volume: %v", err)
}
@@ -193,3 +237,12 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
PublicUrl: assignResult.PublicUrl,
}, err
}
+
+func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
+
+ for _, master := range fs.option.Masters {
+ _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection))
+ }
+
+ return &filer_pb.DeleteCollectionResponse{}, err
+}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 6da6b5561..61ca972cc 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -1,8 +1,6 @@
package weed_server
import (
- "net/http"
- "strconv"
"github.com/chrislusf/seaweedfs/weed/filer2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
@@ -10,42 +8,39 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
- "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "net/http"
)
+type FilerOption struct {
+ Masters []string
+ Collection string
+ DefaultReplication string
+ RedirectOnRead bool
+ DisableDirListing bool
+ MaxMB int
+ SecretKey string
+ DirListingLimit int
+ DataCenter string
+}
+
type FilerServer struct {
- port string
- masters []string
- collection string
- defaultReplication string
- redirectOnRead bool
- disableDirListing bool
- secret security.Secret
- filer *filer2.Filer
- maxMB int
+ option *FilerOption
+ secret security.Secret
+ filer *filer2.Filer
}
-func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, ip string, port int, masters []string, collection string,
- replication string, redirectOnRead bool, disableDirListing bool,
- maxMB int,
- secret string,
-) (fs *FilerServer, err error) {
+func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
fs = &FilerServer{
- masters: masters,
- collection: collection,
- defaultReplication: replication,
- redirectOnRead: redirectOnRead,
- disableDirListing: disableDirListing,
- maxMB: maxMB,
- port: ip + ":" + strconv.Itoa(port),
+ option: option,
}
- if len(masters) == 0 {
+ if len(option.Masters) == 0 {
glog.Fatal("master list is required!")
}
- fs.filer = filer2.NewFiler(masters)
+ fs.filer = filer2.NewFiler(option.Masters)
go fs.filer.KeepConnectedToMaster()
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index c690575b6..4e20be5da 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -10,10 +10,10 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/util"
- "strconv"
- "mime/multipart"
"mime"
+ "mime/multipart"
"path"
+ "strconv"
)
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
@@ -30,7 +30,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
if entry.IsDirectory() {
- if fs.disableDirListing {
+ if fs.option.DisableDirListing {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
@@ -70,7 +70,7 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request,
return
}
- if fs.redirectOnRead {
+ if fs.option.RedirectOnRead {
http.Redirect(w, r, urlString, http.StatusFound)
return
}
@@ -121,8 +121,6 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque
w.Header().Set("Content-Type", mimeType)
}
- println("mime type:", mimeType)
-
totalSize := int64(filer2.TotalSize(entry.Chunks))
rangeReq := r.Header.Get("Range")
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index d46db6b73..52be6735c 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -15,6 +15,12 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "os"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
)
type FilerPostResult struct {
@@ -27,9 +33,20 @@ type FilerPostResult struct {
func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Request, path string) (fileId, urlLocation string, err error) {
var entry *filer2.Entry
- if entry, err = fs.filer.FindEntry(filer2.FullPath(path)); err != nil {
+ entry, err = fs.filer.FindEntry(filer2.FullPath(path))
+ if err == filer2.ErrNotFound {
+ return "", "", nil
+ }
+
+ if err != nil {
glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(entry.Chunks) == 0 {
+ glog.V(1).Infof("empty entry: %s", path)
+ w.WriteHeader(http.StatusNoContent)
} else {
fileId = entry.Chunks[0].FileId
urlLocation, err = operation.LookupFileId(fs.filer.GetMaster(), fileId)
@@ -41,16 +58,28 @@ func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Reques
return
}
-func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
+func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) {
ar := &operation.VolumeAssignRequest{
Count: 1,
Replication: replication,
Collection: collection,
Ttl: r.URL.Query().Get("ttl"),
+ DataCenter: dataCenter,
}
- assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar)
+ var altRequest *operation.VolumeAssignRequest
+ if dataCenter != "" {
+ altRequest = &operation.VolumeAssignRequest{
+ Count: 1,
+ Replication: replication,
+ Collection: collection,
+ Ttl: r.URL.Query().Get("ttl"),
+ DataCenter: "",
+ }
+ }
+
+ assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest)
if ae != nil {
- glog.V(0).Infoln("failing to assign a file id", ae.Error())
+ glog.Errorf("failing to assign a file id: %v", ae)
writeJsonError(w, r, http.StatusInternalServerError, ae)
err = ae
return
@@ -65,32 +94,31 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
replication := query.Get("replication")
if replication == "" {
- replication = fs.defaultReplication
+ replication = fs.option.DefaultReplication
}
collection := query.Get("collection")
if collection == "" {
- collection = fs.collection
+ collection = fs.option.Collection
+ }
+ dataCenter := query.Get("dataCenter")
+ if dataCenter == "" {
+ dataCenter = fs.option.DataCenter
}
- if autoChunked := fs.autoChunk(w, r, replication, collection); autoChunked {
+ if autoChunked := fs.autoChunk(w, r, replication, collection, dataCenter); autoChunked {
return
}
- var fileId, urlLocation string
- var err error
-
- if strings.HasPrefix(r.Header.Get("Content-Type"), "multipart/form-data; boundary=") {
- fileId, urlLocation, err = fs.multipartUploadAnalyzer(w, r, replication, collection)
- if err != nil {
- return
- }
- } else {
- fileId, urlLocation, err = fs.monolithicUploadAnalyzer(w, r, replication, collection)
- if err != nil {
- return
- }
+ fileId, urlLocation, err := fs.queryFileInfoByPath(w, r, r.URL.Path)
+ if err == nil && fileId == "" {
+ fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
+ }
+ if err != nil || fileId == "" || urlLocation == "" {
+ return
}
+ glog.V(0).Infof("request header %+v, urlLocation: %v", r.Header, urlLocation)
+
u, _ := url.Parse(urlLocation)
// This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off
@@ -103,6 +131,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
glog.V(4).Infoln("post to", u)
+ // send request to volume server
request := &http.Request{
Method: r.Method,
URL: u,
@@ -116,7 +145,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
resp, do_err := util.Do(request)
if do_err != nil {
- glog.V(0).Infoln("failing to connect to volume server", r.RequestURI, do_err.Error())
+ glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method)
writeJsonError(w, r, http.StatusInternalServerError, do_err)
return
}
@@ -140,6 +169,8 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
writeJsonError(w, r, http.StatusInternalServerError, errors.New(ret.Error))
return
}
+
+ // find correct final path
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if ret.Name != "" {
@@ -153,16 +184,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
}
- // also delete the old fid unless PUT operation
- if r.Method != "PUT" {
- if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
- oldFid := entry.Chunks[0].FileId
- operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
- } else if err != nil && err != filer2.ErrNotFound {
- glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
- }
- }
-
+ // update metadata in filer store
glog.V(4).Infoln("saving", path, "=>", fileId)
entry := &filer2.Entry{
FullPath: filer2.FullPath(path),
@@ -170,13 +192,15 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
},
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
- Size: uint64(r.ContentLength),
+ Size: uint64(ret.Size),
Mtime: time.Now().UnixNano(),
}},
}
@@ -187,6 +211,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return
}
+ // send back post result
reply := FilerPostResult{
Name: ret.Name,
Size: ret.Size,
@@ -200,12 +225,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// curl -X DELETE http://localhost:8888/path/to
func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
- err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), true)
+ err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), false, true)
if err != nil {
- glog.V(4).Infoln("deleting", r.URL.Path, ":", err.Error())
+ glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
- writeJsonQuiet(w, r, http.StatusAccepted, map[string]string{"error": ""})
+ w.WriteHeader(http.StatusNoContent)
}
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 9aac50454..4b1745aaa 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -7,6 +7,7 @@ import (
"net/http"
"path"
"strconv"
+ "strings"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
@@ -16,7 +17,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
-func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string) bool {
+func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool {
if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false
@@ -27,8 +28,8 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica
parsedMaxMB, _ := strconv.ParseInt(query.Get("maxMB"), 10, 32)
maxMB := int32(parsedMaxMB)
- if maxMB <= 0 && fs.maxMB > 0 {
- maxMB = int32(fs.maxMB)
+ if maxMB <= 0 && fs.option.MaxMB > 0 {
+ maxMB = int32(fs.option.MaxMB)
}
if maxMB <= 0 {
glog.V(4).Infoln("AutoChunking not enabled")
@@ -52,7 +53,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica
return false
}
- reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection)
+ reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection, dataCenter)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil {
@@ -61,7 +62,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica
return true
}
-func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string) (filerResult *FilerPostResult, replyerr error) {
+func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) {
multipartReader, multipartReaderErr := r.MultipartReader()
if multipartReaderErr != nil {
@@ -104,7 +105,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) {
writtenChunks = writtenChunks + 1
- fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection)
+ fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
if assignErr != nil {
return nil, assignErr
}
@@ -143,15 +144,9 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
}
path := r.URL.Path
- // also delete the old fid unless PUT operation
- if r.Method != "PUT" {
- if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
- for _, chunk := range entry.Chunks {
- oldFid := chunk.FileId
- operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
- }
- } else if err != nil {
- glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
+ if strings.HasSuffix(path, "/") {
+ if fileName != "" {
+ path += fileName
}
}
@@ -162,6 +157,8 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
diff --git a/weed/server/filer_server_handlers_write_monopart.go b/weed/server/filer_server_handlers_write_monopart.go
deleted file mode 100644
index db8869d67..000000000
--- a/weed/server/filer_server_handlers_write_monopart.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package weed_server
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
-)
-
-var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
-
-func escapeQuotes(s string) string {
- return quoteEscaper.Replace(s)
-}
-
-func createFormFile(writer *multipart.Writer, fieldname, filename, mime string) (io.Writer, error) {
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- escapeQuotes(fieldname), escapeQuotes(filename)))
- if len(mime) == 0 {
- mime = "application/octet-stream"
- }
- h.Set("Content-Type", mime)
- return writer.CreatePart(h)
-}
-
-func makeFormData(filename, mimeType string, content io.Reader) (formData io.Reader, contentType string, err error) {
- buf := new(bytes.Buffer)
- writer := multipart.NewWriter(buf)
- defer writer.Close()
-
- part, err := createFormFile(writer, "file", filename, mimeType)
- if err != nil {
- glog.V(0).Infoln(err)
- return
- }
- _, err = io.Copy(part, content)
- if err != nil {
- glog.V(0).Infoln(err)
- return
- }
-
- formData = buf
- contentType = writer.FormDataContentType()
-
- return
-}
-
-func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) {
- if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
- buf, _ := ioutil.ReadAll(r.Body)
- //checkMD5
- sum := md5.Sum(buf)
- fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
- if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
- glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
- err = fmt.Errorf("MD5 check failed")
- writeJsonError(w, r, http.StatusNotAcceptable, err)
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- }
- return
-}
-
-func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
- /*
- Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
- There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
- a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
- 1. The request url format should be http://$host:$port/$bucketName/$objectName
- 2. bucketName will be mapped to seaweedfs's collection name
- 3. You could customize and make your enhancement.
- */
- lastPos := strings.LastIndex(r.URL.Path, "/")
- if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
- glog.V(0).Infof("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
- err = fmt.Errorf("URL Path is invalid")
- writeJsonError(w, r, http.StatusInternalServerError, err)
- return
- }
-
- if err = checkContentMD5(w, r); err != nil {
- return
- }
-
- fileName := r.URL.Path[lastPos+1:]
- if err = multipartHttpBodyBuilder(w, r, fileName); err != nil {
- return
- }
-
- secondPos := strings.Index(r.URL.Path[1:], "/") + 1
- collection = r.URL.Path[1:secondPos]
- path := r.URL.Path
-
- if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" {
- fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
- }
- return
-}
-
-func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) {
- body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
- if te != nil {
- glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
- writeJsonError(w, r, http.StatusInternalServerError, te)
- err = te
- return
- }
-
- if body != nil {
- switch v := body.(type) {
- case *bytes.Buffer:
- r.ContentLength = int64(v.Len())
- case *bytes.Reader:
- r.ContentLength = int64(v.Len())
- case *strings.Reader:
- r.ContentLength = int64(v.Len())
- }
- }
-
- r.Header.Set("Content-Type", contentType)
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- r.Body = rc
- return
-}
diff --git a/weed/server/filer_server_handlers_write_multipart.go b/weed/server/filer_server_handlers_write_multipart.go
deleted file mode 100644
index 91f892b52..000000000
--- a/weed/server/filer_server_handlers_write_multipart.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package weed_server
-
-import (
- "bytes"
- "io/ioutil"
- "net/http"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
-)
-
-func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string) (fileId, urlLocation string, err error) {
- //Default handle way for http multipart
- if r.Method == "PUT" {
- buf, _ := ioutil.ReadAll(r.Body)
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r)
- if pe != nil {
- glog.V(0).Infoln("failing to parse post body", pe.Error())
- writeJsonError(w, r, http.StatusInternalServerError, pe)
- err = pe
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
-
- path := r.URL.Path
- if strings.HasSuffix(path, "/") {
- if fileName != "" {
- path += fileName
- }
- }
- fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path)
- } else {
- fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection)
- }
- return
-}
diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go
index bfce617c5..554ed3a11 100644
--- a/weed/server/master_ui/templates.go
+++ b/weed/server/master_ui/templates.go
@@ -39,7 +39,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
<td><a href="http://{{ .Leader }}">{{ .Leader }}</a></td>
</tr>
<tr>
- <td class="col-sm-2 field-label"><label>Peers:</label></td>
+ <td class="col-sm-2 field-label"><label>Other Masters:</label></td>
<td class="col-sm-10"><ul class="list-unstyled">
{{ range $k, $p := .Peers }}
<li><a href="{{ $p.ConnectionString }}">{{ $p.Name }}</a></li>
diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go
index c91ab0407..627fe354e 100644
--- a/weed/server/raft_server_handlers.go
+++ b/weed/server/raft_server_handlers.go
@@ -1,8 +1,8 @@
package weed_server
import (
- "net/http"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "net/http"
)
func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
diff --git a/weed/server/volume_grpc_client.go b/weed/server/volume_grpc_client.go
index b3c755239..de6fa23c7 100644
--- a/weed/server/volume_grpc_client.go
+++ b/weed/server/volume_grpc_client.go
@@ -7,8 +7,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "golang.org/x/net/context"
"github.com/chrislusf/seaweedfs/weed/util"
+ "golang.org/x/net/context"
)
func (vs *VolumeServer) GetMaster() string {
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 9294f9bf6..037fca2c2 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -1,10 +1,10 @@
package weed_server
import (
- "net/http"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "net/http"
)
type VolumeServer struct {
diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go
index 6ef79dcdb..77b1274fd 100644
--- a/weed/server/volume_server_handlers.go
+++ b/weed/server/volume_server_handlers.go
@@ -23,19 +23,13 @@ security settings:
func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
- case "GET":
- stats.ReadRequest()
- vs.GetOrHeadHandler(w, r)
- case "HEAD":
+ case "GET", "HEAD":
stats.ReadRequest()
vs.GetOrHeadHandler(w, r)
case "DELETE":
stats.DeleteRequest()
vs.guard.WhiteList(vs.DeleteHandler)(w, r)
- case "PUT":
- stats.WriteRequest()
- vs.guard.WhiteList(vs.PostHandler)(w, r)
- case "POST":
+ case "PUT", "POST":
stats.WriteRequest()
vs.guard.WhiteList(vs.PostHandler)(w, r)
}
diff --git a/weed/server/volume_server_handlers_sync.go b/weed/server/volume_server_handlers_sync.go
index df1fde590..c6e32bb9b 100644
--- a/weed/server/volume_server_handlers_sync.go
+++ b/weed/server/volume_server_handlers_sync.go
@@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -50,13 +51,17 @@ func (vs *VolumeServer) getVolumeDataContentHandler(w http.ResponseWriter, r *ht
}
offset := uint32(util.ParseUint64(r.FormValue("offset"), 0))
size := uint32(util.ParseUint64(r.FormValue("size"), 0))
- content, err := storage.ReadNeedleBlob(v.DataFile(), int64(offset)*storage.NeedlePaddingSize, size)
+ content, err := storage.ReadNeedleBlob(v.DataFile(), int64(offset)*types.NeedlePaddingSize, size)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
- id := util.ParseUint64(r.FormValue("id"), 0)
+ id, err := types.ParseNeedleId(r.FormValue("id"))
+ if err != nil {
+ writeJsonError(w, r, http.StatusBadRequest, err)
+ return
+ }
n := new(storage.Needle)
n.ParseNeedleHeader(content)
if id != n.Id {
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 3864ec903..d32958339 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -9,6 +9,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/topology"
+ "strconv"
+ "time"
)
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
@@ -53,7 +55,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
volumeId, _ := storage.NewVolumeId(vid)
n.ParsePath(fid)
- glog.V(2).Infoln("deleting", n)
+ glog.V(2).Infof("volume %s deleting %s", vid, n)
cookie := n.Cookie
@@ -87,6 +89,14 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
count = chunkManifest.Size
}
+ n.LastModified = uint64(time.Now().Unix())
+ if len(r.FormValue("ts")) > 0 {
+ modifiedTime, err := strconv.ParseInt(r.FormValue("ts"), 10, 64)
+ if err == nil {
+ n.LastModified = uint64(modifiedTime)
+ }
+ }
+
_, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r)
if err == nil {
@@ -103,6 +113,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
var ret []operation.DeleteResult
+ now := uint64(time.Now().Unix())
for _, fid := range r.Form["fid"] {
vid, id_cookie, err := operation.ParseFileId(fid)
if err != nil {
@@ -144,6 +155,7 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
glog.V(0).Infoln("deleting", fid, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
return
}
+ n.LastModified = now
if size, err := vs.store.Delete(volumeId, n); err != nil {
ret = append(ret, operation.DeleteResult{
Fid: fid,
diff --git a/weed/storage/file_id.go b/weed/storage/file_id.go
index 4cfdb16fa..37dcb7c70 100644
--- a/weed/storage/file_id.go
+++ b/weed/storage/file_id.go
@@ -2,42 +2,33 @@ package storage
import (
"encoding/hex"
- "errors"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
type FileId struct {
VolumeId VolumeId
- Key uint64
- Hashcode uint32
+ Key NeedleId
+ Cookie Cookie
}
func NewFileIdFromNeedle(VolumeId VolumeId, n *Needle) *FileId {
- return &FileId{VolumeId: VolumeId, Key: n.Id, Hashcode: n.Cookie}
-}
-func NewFileId(VolumeId VolumeId, Key uint64, Hashcode uint32) *FileId {
- return &FileId{VolumeId: VolumeId, Key: Key, Hashcode: Hashcode}
+ return &FileId{VolumeId: VolumeId, Key: n.Id, Cookie: n.Cookie}
}
-func ParseFileId(fid string) (*FileId, error) {
- a := strings.Split(fid, ",")
- if len(a) != 2 {
- glog.V(1).Infoln("Invalid fid ", fid, ", split length ", len(a))
- return nil, errors.New("Invalid fid " + fid)
- }
- vid_string, key_hash_string := a[0], a[1]
- volumeId, _ := NewVolumeId(vid_string)
- key, hash, e := ParseKeyHash(key_hash_string)
- return &FileId{VolumeId: volumeId, Key: key, Hashcode: hash}, e
+
+func NewFileId(VolumeId VolumeId, key uint64, cookie uint32) *FileId {
+ return &FileId{VolumeId: VolumeId, Key: Uint64ToNeedleId(key), Cookie: Uint32ToCookie(cookie)}
}
+
func (n *FileId) String() string {
- bytes := make([]byte, 12)
- util.Uint64toBytes(bytes[0:8], n.Key)
- util.Uint32toBytes(bytes[8:12], n.Hashcode)
+ return n.VolumeId.String() + "," + formatNeedleIdCookie(n.Key, n.Cookie)
+}
+
+func formatNeedleIdCookie(key NeedleId, cookie Cookie) string {
+ bytes := make([]byte, NeedleIdSize+CookieSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], cookie)
nonzero_index := 0
for ; bytes[nonzero_index] == 0; nonzero_index++ {
}
- return n.VolumeId.String() + "," + hex.EncodeToString(bytes[nonzero_index:])
+ return hex.EncodeToString(bytes[nonzero_index:])
}
diff --git a/weed/storage/needle.go b/weed/storage/needle.go
index 2ffaff4de..31bada091 100644
--- a/weed/storage/needle.go
+++ b/weed/storage/needle.go
@@ -3,27 +3,19 @@ package storage
import (
"encoding/json"
"fmt"
- "io/ioutil"
- "math"
- "mime"
"net/http"
- "path"
"strconv"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images"
- "github.com/chrislusf/seaweedfs/weed/operation"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "io/ioutil"
)
const (
- NeedleHeaderSize = 16 //should never change this
- NeedlePaddingSize = 8
- NeedleChecksumSize = 4
- MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8
- TombstoneFileSize = math.MaxUint32
- PairNamePrefix = "Seaweed-"
+ NeedleChecksumSize = 4
+ PairNamePrefix = "Seaweed-"
)
/*
@@ -31,9 +23,9 @@ const (
* Needle file size is limited to 4GB for now.
*/
type Needle struct {
- Cookie uint32 `comment:"random number to mitigate brute force lookups"`
- Id uint64 `comment:"needle id"`
- Size uint32 `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"`
+ Cookie Cookie `comment:"random number to mitigate brute force lookups"`
+ Id NeedleId `comment:"needle id"`
+ Size uint32 `comment:"sum of DataSize,Data,NameSize,Name,MimeSize,Mime"`
DataSize uint32 `comment:"Data size"` //version2
Data []byte `comment:"The actual file data"`
@@ -52,7 +44,7 @@ type Needle struct {
}
func (n *Needle) String() (str string) {
- str = fmt.Sprintf("Cookie:%d, Id:%d, Size:%d, DataSize:%d, Name: %s, Mime: %s", n.Cookie, n.Id, n.Size, n.DataSize, n.Name, n.Mime)
+ str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime)
return
}
@@ -66,91 +58,20 @@ func ParseUpload(r *http.Request) (
}
}
- form, fe := r.MultipartReader()
- if fe != nil {
- glog.V(0).Infoln("MultipartReader [ERROR]", fe)
- e = fe
- return
- }
-
- //first multi-part item
- part, fe := form.NextPart()
- if fe != nil {
- glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
- e = fe
- return
- }
+ isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
- fileName = part.FileName()
- if fileName != "" {
- fileName = path.Base(fileName)
+ if r.Method == "POST" {
+ fileName, data, mimeType, isGzipped, e = parseMultipart(r, isChunkedFile)
+ } else {
+ isGzipped = false
+ mimeType = r.Header.Get("Content-Type")
+ fileName = ""
+ data, e = ioutil.ReadAll(r.Body)
}
-
- data, e = ioutil.ReadAll(part)
if e != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", e)
return
}
- //if the filename is empty string, do a search on the other multi-part items
- for fileName == "" {
- part2, fe := form.NextPart()
- if fe != nil {
- break // no more or on error, just safely break
- }
-
- fName := part2.FileName()
-
- //found the first <file type> multi-part has filename
- if fName != "" {
- data2, fe2 := ioutil.ReadAll(part2)
- if fe2 != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", fe2)
- e = fe2
- return
- }
-
- //update
- data = data2
- fileName = path.Base(fName)
- break
- }
- }
-
- isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
-
- if !isChunkedFile {
-
- dotIndex := strings.LastIndex(fileName, ".")
- ext, mtype := "", ""
- if dotIndex > 0 {
- ext = strings.ToLower(fileName[dotIndex:])
- mtype = mime.TypeByExtension(ext)
- }
- contentType := part.Header.Get("Content-Type")
- if contentType != "" && mtype != contentType {
- mimeType = contentType //only return mime type if not deductable
- mtype = contentType
- }
-
- if part.Header.Get("Content-Encoding") == "gzip" {
- isGzipped = true
- } else if operation.IsGzippable(ext, mtype) {
- if data, e = operation.GzipData(data); e != nil {
- return
- }
- isGzipped = true
- }
- if ext == ".gz" {
- if strings.HasSuffix(fileName, ".css.gz") ||
- strings.HasSuffix(fileName, ".html.gz") ||
- strings.HasSuffix(fileName, ".txt.gz") ||
- strings.HasSuffix(fileName, ".js.gz") {
- fileName = fileName[:len(fileName)-3]
- isGzipped = true
- }
- }
- }
modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
ttl, _ = ReadTTL(r.FormValue("ttl"))
@@ -222,7 +143,7 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
}
func (n *Needle) ParsePath(fid string) (err error) {
length := len(fid)
- if length <= 8 {
+ if length <= CookieSize*2 {
return fmt.Errorf("Invalid fid: %s", fid)
}
delta := ""
@@ -230,13 +151,13 @@ func (n *Needle) ParsePath(fid string) (err error) {
if deltaIndex > 0 {
fid, delta = fid[0:deltaIndex], fid[deltaIndex+1:]
}
- n.Id, n.Cookie, err = ParseKeyHash(fid)
+ n.Id, n.Cookie, err = ParseNeedleIdCookie(fid)
if err != nil {
return err
}
if delta != "" {
if d, e := strconv.ParseUint(delta, 10, 64); e == nil {
- n.Id += d
+ n.Id += NeedleId(d)
} else {
return e
}
@@ -244,21 +165,25 @@ func (n *Needle) ParsePath(fid string) (err error) {
return err
}
-func ParseKeyHash(key_hash_string string) (uint64, uint32, error) {
- if len(key_hash_string) <= 8 {
+func ParseNeedleIdCookie(key_hash_string string) (NeedleId, Cookie, error) {
+ if len(key_hash_string) <= CookieSize*2 {
return 0, 0, fmt.Errorf("KeyHash is too short.")
}
- if len(key_hash_string) > 24 {
+ if len(key_hash_string) > (NeedleIdSize+CookieSize)*2 {
return 0, 0, fmt.Errorf("KeyHash is too long.")
}
- split := len(key_hash_string) - 8
- key, err := strconv.ParseUint(key_hash_string[:split], 16, 64)
+ split := len(key_hash_string) - CookieSize*2
+ needleId, err := ParseNeedleId(key_hash_string[:split])
if err != nil {
- return 0, 0, fmt.Errorf("Parse key error: %v", err)
+ return 0, 0, fmt.Errorf("Parse needleId error: %v", err)
}
- hash, err := strconv.ParseUint(key_hash_string[split:], 16, 32)
+ cookie, err := ParseCookie(key_hash_string[split:])
if err != nil {
- return 0, 0, fmt.Errorf("Parse hash error: %v", err)
+ return 0, 0, fmt.Errorf("Parse cookie error: %v", err)
}
- return key, uint32(hash), nil
+ return needleId, cookie, nil
+}
+
+func (n *Needle) LastModifiedString() string {
+ return time.Unix(int64(n.LastModified), 0).Format("2006-01-02T15:04:05")
}
diff --git a/weed/storage/needle/btree_map.go b/weed/storage/needle/btree_map.go
index 64c0bacc1..d688b802e 100644
--- a/weed/storage/needle/btree_map.go
+++ b/weed/storage/needle/btree_map.go
@@ -1,6 +1,7 @@
package needle
import (
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/google/btree"
)
@@ -15,7 +16,7 @@ func NewBtreeMap() *BtreeMap {
}
}
-func (cm *BtreeMap) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32) {
+func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
found := cm.tree.ReplaceOrInsert(NeedleValue{key, offset, size})
if found != nil {
old := found.(NeedleValue)
@@ -24,7 +25,7 @@ func (cm *BtreeMap) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32
return
}
-func (cm *BtreeMap) Delete(key Key) (oldSize uint32) {
+func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) {
found := cm.tree.Delete(NeedleValue{key, 0, 0})
if found != nil {
old := found.(NeedleValue)
@@ -32,7 +33,7 @@ func (cm *BtreeMap) Delete(key Key) (oldSize uint32) {
}
return
}
-func (cm *BtreeMap) Get(key Key) (*NeedleValue, bool) {
+func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) {
found := cm.tree.Get(NeedleValue{key, 0, 0})
if found != nil {
old := found.(NeedleValue)
diff --git a/weed/storage/needle/compact_map.go b/weed/storage/needle/compact_map.go
index ea2360fa7..9852dca74 100644
--- a/weed/storage/needle/compact_map.go
+++ b/weed/storage/needle/compact_map.go
@@ -1,28 +1,29 @@
package needle
import (
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"sync"
)
type CompactSection struct {
sync.RWMutex
values []NeedleValue
- overflow map[Key]NeedleValue
- start Key
- end Key
+ overflow map[NeedleId]NeedleValue
+ start NeedleId
+ end NeedleId
counter int
}
-func NewCompactSection(start Key) *CompactSection {
+func NewCompactSection(start NeedleId) *CompactSection {
return &CompactSection{
values: make([]NeedleValue, batch),
- overflow: make(map[Key]NeedleValue),
+ overflow: make(map[NeedleId]NeedleValue),
start: start,
}
}
//return old entry size
-func (cs *CompactSection) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32) {
+func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
cs.Lock()
if key > cs.end {
cs.end = key
@@ -52,7 +53,7 @@ func (cs *CompactSection) Set(key Key, offset, size uint32) (oldOffset, oldSize
}
//return old entry size
-func (cs *CompactSection) Delete(key Key) uint32 {
+func (cs *CompactSection) Delete(key NeedleId) uint32 {
cs.Lock()
ret := uint32(0)
if i := cs.binarySearchValues(key); i >= 0 {
@@ -68,7 +69,7 @@ func (cs *CompactSection) Delete(key Key) uint32 {
cs.Unlock()
return ret
}
-func (cs *CompactSection) Get(key Key) (*NeedleValue, bool) {
+func (cs *CompactSection) Get(key NeedleId) (*NeedleValue, bool) {
cs.RLock()
if v, ok := cs.overflow[key]; ok {
cs.RUnlock()
@@ -81,7 +82,7 @@ func (cs *CompactSection) Get(key Key) (*NeedleValue, bool) {
cs.RUnlock()
return nil, false
}
-func (cs *CompactSection) binarySearchValues(key Key) int {
+func (cs *CompactSection) binarySearchValues(key NeedleId) int {
l, h := 0, cs.counter-1
if h >= 0 && cs.values[h].Key < key {
return -2
@@ -112,7 +113,7 @@ func NewCompactMap() *CompactMap {
return &CompactMap{}
}
-func (cm *CompactMap) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32) {
+func (cm *CompactMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) {
x := cm.binarySearchCompactSection(key)
if x < 0 {
//println(x, "creating", len(cm.list), "section, starting", key)
@@ -130,21 +131,21 @@ func (cm *CompactMap) Set(key Key, offset, size uint32) (oldOffset, oldSize uint
}
return cm.list[x].Set(key, offset, size)
}
-func (cm *CompactMap) Delete(key Key) uint32 {
+func (cm *CompactMap) Delete(key NeedleId) uint32 {
x := cm.binarySearchCompactSection(key)
if x < 0 {
return uint32(0)
}
return cm.list[x].Delete(key)
}
-func (cm *CompactMap) Get(key Key) (*NeedleValue, bool) {
+func (cm *CompactMap) Get(key NeedleId) (*NeedleValue, bool) {
x := cm.binarySearchCompactSection(key)
if x < 0 {
return nil, false
}
return cm.list[x].Get(key)
}
-func (cm *CompactMap) binarySearchCompactSection(key Key) int {
+func (cm *CompactMap) binarySearchCompactSection(key NeedleId) int {
l, h := 0, len(cm.list)-1
if h < 0 {
return -5
diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle/compact_map_perf_test.go
index 8a26e7ed3..a66836ac8 100644
--- a/weed/storage/needle/compact_map_perf_test.go
+++ b/weed/storage/needle/compact_map_perf_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -29,11 +30,12 @@ func loadNewNeedleMap(file *os.File) {
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {
- key := util.BytesToUint64(bytes[i : i+8])
- offset := util.BytesToUint32(bytes[i+8 : i+12])
- size := util.BytesToUint32(bytes[i+12 : i+16])
+ key := BytesToNeedleId(bytes[i : i+NeedleIdSize])
+ offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize])
+ size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize])
+
if offset > 0 {
- m.Set(Key(key), offset, size)
+ m.Set(NeedleId(key), offset, size)
} else {
//delete(m, key)
}
diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go
index 4d574bafe..b4cbb446a 100644
--- a/weed/storage/needle/compact_map_test.go
+++ b/weed/storage/needle/compact_map_test.go
@@ -1,17 +1,18 @@
package needle
import (
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"testing"
)
func TestIssue52(t *testing.T) {
m := NewCompactMap()
- m.Set(Key(10002), 10002, 10002)
- if element, ok := m.Get(Key(10002)); ok {
+ m.Set(NeedleId(10002), 10002, 10002)
+ if element, ok := m.Get(NeedleId(10002)); ok {
println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
}
- m.Set(Key(10001), 10001, 10001)
- if element, ok := m.Get(Key(10002)); ok {
+ m.Set(NeedleId(10001), 10001, 10001)
+ if element, ok := m.Get(NeedleId(10002)); ok {
println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
} else {
t.Fatal("key 10002 missing after setting 10001")
@@ -21,15 +22,15 @@ func TestIssue52(t *testing.T) {
func TestXYZ(t *testing.T) {
m := NewCompactMap()
for i := uint32(0); i < 100*batch; i += 2 {
- m.Set(Key(i), i, i)
+ m.Set(NeedleId(i), Offset(i), i)
}
for i := uint32(0); i < 100*batch; i += 37 {
- m.Delete(Key(i))
+ m.Delete(NeedleId(i))
}
for i := uint32(0); i < 10*batch; i += 3 {
- m.Set(Key(i), i+11, i+5)
+ m.Set(NeedleId(i), Offset(i+11), i+5)
}
// for i := uint32(0); i < 100; i++ {
@@ -39,7 +40,7 @@ func TestXYZ(t *testing.T) {
// }
for i := uint32(0); i < 10*batch; i++ {
- v, ok := m.Get(Key(i))
+ v, ok := m.Get(NeedleId(i))
if i%3 == 0 {
if !ok {
t.Fatal("key", i, "missing!")
@@ -59,7 +60,7 @@ func TestXYZ(t *testing.T) {
}
for i := uint32(10 * batch); i < 100*batch; i++ {
- v, ok := m.Get(Key(i))
+ v, ok := m.Get(NeedleId(i))
if i%37 == 0 {
if ok && v.Size > 0 {
t.Fatal("key", i, "should have been deleted needle value", v)
diff --git a/weed/storage/needle/needle_value.go b/weed/storage/needle/needle_value.go
index 137ab0814..b15d25245 100644
--- a/weed/storage/needle/needle_value.go
+++ b/weed/storage/needle/needle_value.go
@@ -1,8 +1,7 @@
package needle
import (
- "strconv"
-
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/google/btree"
)
@@ -11,8 +10,8 @@ const (
)
type NeedleValue struct {
- Key Key
- Offset uint32 `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
+ Key NeedleId
+ Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
Size uint32 `comment:"Size of the data portion"`
}
@@ -20,9 +19,3 @@ func (this NeedleValue) Less(than btree.Item) bool {
that := than.(NeedleValue)
return this.Key < that.Key
}
-
-type Key uint64
-
-func (k Key) String() string {
- return strconv.FormatUint(uint64(k), 10)
-}
diff --git a/weed/storage/needle/needle_value_map.go b/weed/storage/needle/needle_value_map.go
index 81f41b235..9da257443 100644
--- a/weed/storage/needle/needle_value_map.go
+++ b/weed/storage/needle/needle_value_map.go
@@ -1,8 +1,12 @@
package needle
+import (
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
type NeedleValueMap interface {
- Set(key Key, offset, size uint32) (oldOffset, oldSize uint32)
- Delete(key Key) uint32
- Get(key Key) (*NeedleValue, bool)
+ Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32)
+ Delete(key NeedleId) uint32
+ Get(key NeedleId) (*NeedleValue, bool)
Visit(visit func(NeedleValue) error) error
}
diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go
index 14e4ccf3a..6d815679b 100644
--- a/weed/storage/needle_map.go
+++ b/weed/storage/needle_map.go
@@ -7,6 +7,7 @@ import (
"sync"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -19,21 +20,17 @@ const (
NeedleMapBtree
)
-const (
- NeedleIndexSize = 16
-)
-
type NeedleMapper interface {
- Put(key uint64, offset uint32, size uint32) error
- Get(key uint64) (element *needle.NeedleValue, ok bool)
- Delete(key uint64, offset uint32) error
+ Put(key NeedleId, offset Offset, size uint32) error
+ Get(key NeedleId) (element *needle.NeedleValue, ok bool)
+ Delete(key NeedleId, offset Offset) error
Close()
Destroy() error
ContentSize() uint64
DeletedSize() uint64
FileCount() int
DeletedCount() int
- MaxFileKey() uint64
+ MaxFileKey() NeedleId
IndexFileSize() uint64
IndexFileContent() ([]byte, error)
IndexFileName() string
@@ -58,17 +55,17 @@ func (nm *baseNeedleMapper) IndexFileName() string {
return nm.indexFile.Name()
}
-func idxFileEntry(bytes []byte) (key uint64, offset uint32, size uint32) {
- key = util.BytesToUint64(bytes[:8])
- offset = util.BytesToUint32(bytes[8:12])
- size = util.BytesToUint32(bytes[12:16])
+func IdxFileEntry(bytes []byte) (key NeedleId, offset Offset, size uint32) {
+ key = BytesToNeedleId(bytes[:NeedleIdSize])
+ offset = BytesToOffset(bytes[NeedleIdSize : NeedleIdSize+OffsetSize])
+ size = util.BytesToUint32(bytes[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
return
}
-func (nm *baseNeedleMapper) appendToIndexFile(key uint64, offset uint32, size uint32) error {
- bytes := make([]byte, 16)
- util.Uint64toBytes(bytes[0:8], key)
- util.Uint32toBytes(bytes[8:12], offset)
- util.Uint32toBytes(bytes[12:16], size)
+func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size uint32) error {
+ bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
+ util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
nm.indexFileAccessLock.Lock()
defer nm.indexFileAccessLock.Unlock()
@@ -84,46 +81,3 @@ func (nm *baseNeedleMapper) IndexFileContent() ([]byte, error) {
defer nm.indexFileAccessLock.Unlock()
return ioutil.ReadFile(nm.indexFile.Name())
}
-
-type mapMetric struct {
- indexFile *os.File
-
- DeletionCounter int `json:"DeletionCounter"`
- FileCounter int `json:"FileCounter"`
- DeletionByteCounter uint64 `json:"DeletionByteCounter"`
- FileByteCounter uint64 `json:"FileByteCounter"`
- MaximumFileKey uint64 `json:"MaxFileKey"`
-}
-
-func (mm *mapMetric) logDelete(deletedByteCount uint32) {
- mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(deletedByteCount)
- mm.DeletionCounter++
-}
-
-func (mm *mapMetric) logPut(key uint64, oldSize uint32, newSize uint32) {
- if key > mm.MaximumFileKey {
- mm.MaximumFileKey = key
- }
- mm.FileCounter++
- mm.FileByteCounter = mm.FileByteCounter + uint64(newSize)
- if oldSize > 0 {
- mm.DeletionCounter++
- mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(oldSize)
- }
-}
-
-func (mm mapMetric) ContentSize() uint64 {
- return mm.FileByteCounter
-}
-func (mm mapMetric) DeletedSize() uint64 {
- return mm.DeletionByteCounter
-}
-func (mm mapMetric) FileCount() int {
- return mm.FileCounter
-}
-func (mm mapMetric) DeletedCount() int {
- return mm.DeletionCounter
-}
-func (mm mapMetric) MaxFileKey() uint64 {
- return mm.MaximumFileKey
-}
diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go
index 5e64c5199..fd335fd00 100644
--- a/weed/storage/needle_map_boltdb.go
+++ b/weed/storage/needle_map_boltdb.go
@@ -6,8 +6,10 @@ import (
"github.com/boltdb/bolt"
+ "errors"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -19,6 +21,8 @@ type BoltDbNeedleMap struct {
var boltdbBucket = []byte("weed")
+var NotFound = errors.New("not found")
+
// TODO avoid using btree to count deletions.
func NewBoltDbNeedleMap(dbFileName string, indexFile *os.File) (m *BoltDbNeedleMap, err error) {
m = &BoltDbNeedleMap{dbFileName: dbFileName}
@@ -33,11 +37,11 @@ func NewBoltDbNeedleMap(dbFileName string, indexFile *os.File) (m *BoltDbNeedleM
return
}
glog.V(1).Infof("Loading %s...", indexFile.Name())
- nm, indexLoadError := LoadBtreeNeedleMap(indexFile)
+ mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
}
- m.mapMetric = nm.mapMetric
+ m.mapMetric = *mm
return
}
@@ -64,7 +68,7 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error {
return err
}
defer db.Close()
- return WalkIndexFile(indexFile, func(key uint64, offset, size uint32) error {
+ return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset > 0 && size != TombstoneFileSize {
boltDbWrite(db, key, offset, size)
} else {
@@ -74,10 +78,11 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error {
})
}
-func (m *BoltDbNeedleMap) Get(key uint64) (element *needle.NeedleValue, ok bool) {
- var offset, size uint32
- bytes := make([]byte, 8)
- util.Uint64toBytes(bytes, key)
+func (m *BoltDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
+ var offset Offset
+ var size uint32
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes, key)
err := m.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(boltdbBucket)
if bucket == nil {
@@ -86,13 +91,17 @@ func (m *BoltDbNeedleMap) Get(key uint64) (element *needle.NeedleValue, ok bool)
data := bucket.Get(bytes)
- if len(data) != 8 {
- glog.V(0).Infof("wrong data length: %d", len(data))
- return fmt.Errorf("wrong data length: %d", len(data))
+ if len(data) == 0 {
+ return NotFound
+ }
+
+ if len(data) != OffsetSize+SizeSize {
+ glog.V(0).Infof("key:%v has wrong data length: %d", key, len(data))
+ return fmt.Errorf("key:%v has wrong data length: %d", key, len(data))
}
- offset = util.BytesToUint32(data[0:4])
- size = util.BytesToUint32(data[4:8])
+ offset = BytesToOffset(data[0:OffsetSize])
+ size = util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
return nil
})
@@ -100,10 +109,10 @@ func (m *BoltDbNeedleMap) Get(key uint64) (element *needle.NeedleValue, ok bool)
if err != nil {
return nil, false
}
- return &needle.NeedleValue{Key: needle.Key(key), Offset: offset, Size: size}, true
+ return &needle.NeedleValue{Key: key, Offset: offset, Size: size}, true
}
-func (m *BoltDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
+func (m *BoltDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
var oldSize uint32
if oldNeedle, ok := m.Get(key); ok {
oldSize = oldNeedle.Size
@@ -117,27 +126,29 @@ func (m *BoltDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
}
func boltDbWrite(db *bolt.DB,
- key uint64, offset uint32, size uint32) error {
- bytes := make([]byte, 16)
- util.Uint64toBytes(bytes[0:8], key)
- util.Uint32toBytes(bytes[8:12], offset)
- util.Uint32toBytes(bytes[12:16], size)
+ key NeedleId, offset Offset, size uint32) error {
+
+ bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
+ util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
+
return db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
if err != nil {
return err
}
- err = bucket.Put(bytes[0:8], bytes[8:16])
+ err = bucket.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize])
if err != nil {
return err
}
return nil
})
}
-func boltDbDelete(db *bolt.DB, key uint64) error {
- bytes := make([]byte, 8)
- util.Uint64toBytes(bytes, key)
+func boltDbDelete(db *bolt.DB, key NeedleId) error {
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes, key)
return db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
if err != nil {
@@ -152,7 +163,7 @@ func boltDbDelete(db *bolt.DB, key uint64) error {
})
}
-func (m *BoltDbNeedleMap) Delete(key uint64, offset uint32) error {
+func (m *BoltDbNeedleMap) Delete(key NeedleId, offset Offset) error {
if oldNeedle, ok := m.Get(key); ok {
m.logDelete(oldNeedle.Size)
}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 2d4ff4d71..1580bb005 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/syndtr/goleveldb/leveldb"
)
@@ -31,11 +32,11 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File) (m *LevelDbNeedl
return
}
glog.V(1).Infof("Loading %s...", indexFile.Name())
- nm, indexLoadError := LoadBtreeNeedleMap(indexFile)
+ mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil {
return nil, indexLoadError
}
- m.mapMetric = nm.mapMetric
+ m.mapMetric = *mm
return
}
@@ -62,7 +63,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
return err
}
defer db.Close()
- return WalkIndexFile(indexFile, func(key uint64, offset, size uint32) error {
+ return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset > 0 && size != TombstoneFileSize {
levelDbWrite(db, key, offset, size)
} else {
@@ -72,19 +73,19 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
})
}
-func (m *LevelDbNeedleMap) Get(key uint64) (element *needle.NeedleValue, ok bool) {
- bytes := make([]byte, 8)
- util.Uint64toBytes(bytes, key)
+func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
data, err := m.db.Get(bytes, nil)
- if err != nil || len(data) != 8 {
+ if err != nil || len(data) != OffsetSize+SizeSize {
return nil, false
}
- offset := util.BytesToUint32(data[0:4])
- size := util.BytesToUint32(data[4:8])
- return &needle.NeedleValue{Key: needle.Key(key), Offset: offset, Size: size}, true
+ offset := BytesToOffset(data[0:OffsetSize])
+ size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
+ return &needle.NeedleValue{Key: NeedleId(key), Offset: offset, Size: size}, true
}
-func (m *LevelDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
+func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
var oldSize uint32
if oldNeedle, ok := m.Get(key); ok {
oldSize = oldNeedle.Size
@@ -98,23 +99,25 @@ func (m *LevelDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
}
func levelDbWrite(db *leveldb.DB,
- key uint64, offset uint32, size uint32) error {
- bytes := make([]byte, 16)
- util.Uint64toBytes(bytes[0:8], key)
- util.Uint32toBytes(bytes[8:12], offset)
- util.Uint32toBytes(bytes[12:16], size)
- if err := db.Put(bytes[0:8], bytes[8:16], nil); err != nil {
+ key NeedleId, offset Offset, size uint32) error {
+
+ bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset)
+ util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size)
+
+ if err := db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil {
return fmt.Errorf("failed to write leveldb: %v", err)
}
return nil
}
-func levelDbDelete(db *leveldb.DB, key uint64) error {
- bytes := make([]byte, 8)
- util.Uint64toBytes(bytes, key)
+func levelDbDelete(db *leveldb.DB, key NeedleId) error {
+ bytes := make([]byte, NeedleIdSize)
+ NeedleIdToBytes(bytes, key)
return db.Delete(bytes, nil)
}
-func (m *LevelDbNeedleMap) Delete(key uint64, offset uint32) error {
+func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error {
if oldNeedle, ok := m.Get(key); ok {
m.logDelete(oldNeedle.Size)
}
diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go
index 261486cf8..fa5576c2b 100644
--- a/weed/storage/needle_map_memory.go
+++ b/weed/storage/needle_map_memory.go
@@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
type NeedleMap struct {
@@ -45,21 +46,21 @@ func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) {
}
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
- e := WalkIndexFile(file, func(key uint64, offset, size uint32) error {
+ e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
if key > nm.MaximumFileKey {
nm.MaximumFileKey = key
}
if offset > 0 && size != TombstoneFileSize {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
- oldOffset, oldSize := nm.m.Set(needle.Key(key), offset, size)
+ oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size)
// glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
if oldOffset > 0 && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
- oldSize := nm.m.Delete(needle.Key(key))
+ oldSize := nm.m.Delete(NeedleId(key))
// glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
@@ -72,21 +73,22 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
// walks through the index file, calls fn function with each key, offset, size
// stops with the error returned by the fn function
-func WalkIndexFile(r *os.File, fn func(key uint64, offset, size uint32) error) error {
+func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32) error) error {
var readerOffset int64
- bytes := make([]byte, 16*RowsToRead)
+ bytes := make([]byte, NeedleEntrySize*RowsToRead)
count, e := r.ReadAt(bytes, readerOffset)
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
readerOffset += int64(count)
var (
- key uint64
- offset, size uint32
- i int
+ key NeedleId
+ offset Offset
+ size uint32
+ i int
)
for count > 0 && e == nil || e == io.EOF {
- for i = 0; i+16 <= count; i += 16 {
- key, offset, size = idxFileEntry(bytes[i : i+16])
+ for i = 0; i+NeedleEntrySize <= count; i += NeedleEntrySize {
+ key, offset, size = IdxFileEntry(bytes[i : i+NeedleEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
@@ -101,17 +103,17 @@ func WalkIndexFile(r *os.File, fn func(key uint64, offset, size uint32) error) e
return e
}
-func (nm *NeedleMap) Put(key uint64, offset uint32, size uint32) error {
- _, oldSize := nm.m.Set(needle.Key(key), offset, size)
+func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
+ _, oldSize := nm.m.Set(NeedleId(key), offset, size)
nm.logPut(key, oldSize, size)
return nm.appendToIndexFile(key, offset, size)
}
-func (nm *NeedleMap) Get(key uint64) (element *needle.NeedleValue, ok bool) {
- element, ok = nm.m.Get(needle.Key(key))
+func (nm *NeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) {
+ element, ok = nm.m.Get(NeedleId(key))
return
}
-func (nm *NeedleMap) Delete(key uint64, offset uint32) error {
- deletedBytes := nm.m.Delete(needle.Key(key))
+func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error {
+ deletedBytes := nm.m.Delete(NeedleId(key))
nm.logDelete(deletedBytes)
return nm.appendToIndexFile(key, offset, TombstoneFileSize)
}
diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go
new file mode 100644
index 000000000..3bcb140f1
--- /dev/null
+++ b/weed/storage/needle_map_metric.go
@@ -0,0 +1,106 @@
+package storage
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/willf/bloom"
+ "os"
+)
+
+type mapMetric struct {
+ DeletionCounter int `json:"DeletionCounter"`
+ FileCounter int `json:"FileCounter"`
+ DeletionByteCounter uint64 `json:"DeletionByteCounter"`
+ FileByteCounter uint64 `json:"FileByteCounter"`
+ MaximumFileKey NeedleId `json:"MaxFileKey"`
+}
+
+func (mm *mapMetric) logDelete(deletedByteCount uint32) {
+ mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(deletedByteCount)
+ mm.DeletionCounter++
+}
+
+func (mm *mapMetric) logPut(key NeedleId, oldSize uint32, newSize uint32) {
+ if key > mm.MaximumFileKey {
+ mm.MaximumFileKey = key
+ }
+ mm.FileCounter++
+ mm.FileByteCounter = mm.FileByteCounter + uint64(newSize)
+ if oldSize > 0 {
+ mm.DeletionCounter++
+ mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(oldSize)
+ }
+}
+
+func (mm mapMetric) ContentSize() uint64 {
+ return mm.FileByteCounter
+}
+func (mm mapMetric) DeletedSize() uint64 {
+ return mm.DeletionByteCounter
+}
+func (mm mapMetric) FileCount() int {
+ return mm.FileCounter
+}
+func (mm mapMetric) DeletedCount() int {
+ return mm.DeletionCounter
+}
+func (mm mapMetric) MaxFileKey() NeedleId {
+ return mm.MaximumFileKey
+}
+
+func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
+ mm = &mapMetric{}
+ var bf *bloom.BloomFilter
+ buf := make([]byte, NeedleIdSize)
+ err = reverseWalkIndexFile(r, func(entryCount int64) {
+ bf = bloom.NewWithEstimates(uint(entryCount), 0.001)
+ }, func(key NeedleId, offset Offset, size uint32) error {
+
+ if key > mm.MaximumFileKey {
+ mm.MaximumFileKey = key
+ }
+ NeedleIdToBytes(buf, key)
+ if size != TombstoneFileSize {
+ mm.FileByteCounter += uint64(size)
+ }
+
+ if !bf.Test(buf) {
+ mm.FileCounter++
+ bf.Add(buf)
+ } else {
+ // deleted file
+ mm.DeletionCounter++
+ if size != TombstoneFileSize {
+ // previously already deleted file
+ mm.DeletionByteCounter += uint64(size)
+ }
+ }
+ return nil
+ })
+ return
+}
+
+func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key NeedleId, offset Offset, size uint32) error) error {
+ fi, err := r.Stat()
+ if err != nil {
+ return fmt.Errorf("file %s stat error: %v", r.Name(), err)
+ }
+ fileSize := fi.Size()
+ if fileSize%NeedleEntrySize != 0 {
+ return fmt.Errorf("unexpected file %s size: %d", r.Name(), fileSize)
+ }
+
+ initFn(fileSize / NeedleEntrySize)
+
+ bytes := make([]byte, NeedleEntrySize)
+ for readerOffset := fileSize - NeedleEntrySize; readerOffset >= 0; readerOffset -= NeedleEntrySize {
+ count, e := r.ReadAt(bytes, readerOffset)
+ glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
+ key, offset, size := IdxFileEntry(bytes)
+ if e = fn(key, offset, size); e != nil {
+ return e
+ }
+ }
+ return nil
+}
diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go
new file mode 100644
index 000000000..539f83a87
--- /dev/null
+++ b/weed/storage/needle_map_metric_test.go
@@ -0,0 +1,30 @@
+package storage
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "io/ioutil"
+ "math/rand"
+ "testing"
+)
+
+func TestFastLoadingNeedleMapMetrics(t *testing.T) {
+
+ idxFile, _ := ioutil.TempFile("", "tmp.idx")
+ nm := NewBtreeNeedleMap(idxFile)
+
+ for i := 0; i < 10000; i++ {
+ nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1))
+ if rand.Float32() < 0.2 {
+ nm.Delete(Uint64ToNeedleId(uint64(rand.Int63n(int64(i))+1)), Uint32ToOffset(uint32(0)))
+ }
+ }
+
+ mm, _ := newNeedleMapMetricFromIndexFile(idxFile)
+
+ glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount())
+ glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize())
+ glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize())
+ glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount())
+ glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey())
+}
diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go
new file mode 100644
index 000000000..af12b994d
--- /dev/null
+++ b/weed/storage/needle_parse_multipart.go
@@ -0,0 +1,100 @@
+package storage
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "path"
+ "strings"
+)
+
+func parseMultipart(r *http.Request, isChunkedFile bool) (
+ fileName string, data []byte, mimeType string, isGzipped bool, e error) {
+ form, fe := r.MultipartReader()
+ if fe != nil {
+ glog.V(0).Infoln("MultipartReader [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ //first multi-part item
+ part, fe := form.NextPart()
+ if fe != nil {
+ glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ fileName = part.FileName()
+ if fileName != "" {
+ fileName = path.Base(fileName)
+ }
+
+ data, e = ioutil.ReadAll(part)
+ if e != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", e)
+ return
+ }
+
+ //if the filename is empty string, do a search on the other multi-part items
+ for fileName == "" {
+ part2, fe := form.NextPart()
+ if fe != nil {
+ break // no more or on error, just safely break
+ }
+
+ fName := part2.FileName()
+
+ //found the first <file type> multi-part has filename
+ if fName != "" {
+ data2, fe2 := ioutil.ReadAll(part2)
+ if fe2 != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", fe2)
+ e = fe2
+ return
+ }
+
+ //update
+ data = data2
+ fileName = path.Base(fName)
+ break
+ }
+ }
+
+ if !isChunkedFile {
+
+ dotIndex := strings.LastIndex(fileName, ".")
+ ext, mtype := "", ""
+ if dotIndex > 0 {
+ ext = strings.ToLower(fileName[dotIndex:])
+ mtype = mime.TypeByExtension(ext)
+ }
+ contentType := part.Header.Get("Content-Type")
+ if contentType != "" && mtype != contentType {
+ mimeType = contentType //only return mime type if not deductable
+ mtype = contentType
+ }
+
+ if part.Header.Get("Content-Encoding") == "gzip" {
+ isGzipped = true
+ } else if operation.IsGzippable(ext, mtype) {
+ if data, e = operation.GzipData(data); e != nil {
+ return
+ }
+ isGzipped = true
+ }
+ if ext == ".gz" {
+ if strings.HasSuffix(fileName, ".css.gz") ||
+ strings.HasSuffix(fileName, ".html.gz") ||
+ strings.HasSuffix(fileName, ".txt.gz") ||
+ strings.HasSuffix(fileName, ".js.gz") {
+ fileName = fileName[:len(fileName)-3]
+ isGzipped = true
+ }
+ }
+ }
+
+ return
+}
diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go
index 4241f0758..bfd325475 100644
--- a/weed/storage/needle_read_write.go
+++ b/weed/storage/needle_read_write.go
@@ -7,6 +7,7 @@ import (
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -43,27 +44,27 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
}
switch version {
case Version1:
- header := make([]byte, NeedleHeaderSize)
- util.Uint32toBytes(header[0:4], n.Cookie)
- util.Uint64toBytes(header[4:12], n.Id)
+ header := make([]byte, NeedleEntrySize)
+ CookieToBytes(header[0:CookieSize], n.Cookie)
+ NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
n.Size = uint32(len(n.Data))
size = n.Size
- util.Uint32toBytes(header[12:16], n.Size)
+ util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
if _, err = w.Write(header); err != nil {
return
}
if _, err = w.Write(n.Data); err != nil {
return
}
- actualSize = NeedleHeaderSize + int64(n.Size)
- padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
+ actualSize = NeedleEntrySize + int64(n.Size)
+ padding := NeedlePaddingSize - ((NeedleEntrySize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
_, err = w.Write(header[0 : NeedleChecksumSize+padding])
return
case Version2:
- header := make([]byte, NeedleHeaderSize)
- util.Uint32toBytes(header[0:4], n.Cookie)
- util.Uint64toBytes(header[4:12], n.Id)
+ header := make([]byte, NeedleEntrySize)
+ CookieToBytes(header[0:CookieSize], n.Cookie)
+ NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id)
n.DataSize, n.NameSize, n.MimeSize = uint32(len(n.Data)), uint8(len(n.Name)), uint8(len(n.Mime))
if n.DataSize > 0 {
n.Size = 4 + n.DataSize + 1
@@ -86,7 +87,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
n.Size = 0
}
size = n.DataSize
- util.Uint32toBytes(header[12:16], n.Size)
+ util.Uint32toBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size)
if _, err = w.Write(header); err != nil {
return
}
@@ -142,7 +143,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
}
}
}
- padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
+ padding := NeedlePaddingSize - ((NeedleEntrySize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
_, err = w.Write(header[0 : NeedleChecksumSize+padding])
@@ -152,7 +153,9 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
}
func ReadNeedleBlob(r *os.File, offset int64, size uint32) (dataSlice []byte, err error) {
- return getBytesForFileBlock(r, offset, int(getActualSize(size)))
+ dataSlice = make([]byte, int(getActualSize(size)))
+ _, err = r.ReadAt(dataSlice, offset)
+ return dataSlice, err
}
func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version) (err error) {
@@ -166,14 +169,14 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
}
switch version {
case Version1:
- n.Data = bytes[NeedleHeaderSize : NeedleHeaderSize+size]
+ n.Data = bytes[NeedleEntrySize : NeedleEntrySize+size]
case Version2:
- n.readNeedleDataVersion2(bytes[NeedleHeaderSize : NeedleHeaderSize+int(n.Size)])
+ n.readNeedleDataVersion2(bytes[NeedleEntrySize : NeedleEntrySize+int(n.Size)])
}
if size == 0 {
return nil
}
- checksum := util.BytesToUint32(bytes[NeedleHeaderSize+size : NeedleHeaderSize+size+NeedleChecksumSize])
+ checksum := util.BytesToUint32(bytes[NeedleEntrySize+size : NeedleEntrySize+size+NeedleChecksumSize])
newChecksum := NewCRC(n.Data)
if checksum != newChecksum.Value() {
return errors.New("CRC error! Data On Disk Corrupted")
@@ -181,11 +184,13 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
n.Checksum = newChecksum
return nil
}
+
func (n *Needle) ParseNeedleHeader(bytes []byte) {
- n.Cookie = util.BytesToUint32(bytes[0:4])
- n.Id = util.BytesToUint64(bytes[4:12])
- n.Size = util.BytesToUint32(bytes[12:NeedleHeaderSize])
+ n.Cookie = BytesToCookie(bytes[0:CookieSize])
+ n.Id = BytesToNeedleId(bytes[CookieSize : CookieSize+NeedleIdSize])
+ n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleEntrySize])
}
+
func (n *Needle) readNeedleDataVersion2(bytes []byte) {
index, lenBytes := 0, len(bytes)
if index < lenBytes {
@@ -230,25 +235,25 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) {
}
}
-func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bodyLength uint32, err error) {
+func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bodyLength int64, err error) {
n = new(Needle)
if version == Version1 || version == Version2 {
- bytes := make([]byte, NeedleHeaderSize)
+ bytes := make([]byte, NeedleEntrySize)
var count int
count, err = r.ReadAt(bytes, offset)
if count <= 0 || err != nil {
return nil, 0, err
}
n.ParseNeedleHeader(bytes)
- padding := NeedlePaddingSize - ((n.Size + NeedleHeaderSize + NeedleChecksumSize) % NeedlePaddingSize)
- bodyLength = n.Size + NeedleChecksumSize + padding
+ padding := NeedlePaddingSize - ((n.Size + NeedleEntrySize + NeedleChecksumSize) % NeedlePaddingSize)
+ bodyLength = int64(n.Size) + NeedleChecksumSize + int64(padding)
}
return
}
//n should be a needle already read the header
//the input stream will read until next file entry
-func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyLength uint32) (err error) {
+func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyLength int64) (err error) {
if bodyLength <= 0 {
return nil
}
diff --git a/weed/storage/needle_test.go b/weed/storage/needle_test.go
index c05afda2f..65036409c 100644
--- a/weed/storage/needle_test.go
+++ b/weed/storage/needle_test.go
@@ -1,12 +1,15 @@
package storage
-import "testing"
+import (
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "testing"
+)
func TestParseKeyHash(t *testing.T) {
testcases := []struct {
KeyHash string
- ID uint64
- Cookie uint32
+ ID types.NeedleId
+ Cookie types.Cookie
Err bool
}{
// normal
@@ -26,7 +29,7 @@ func TestParseKeyHash(t *testing.T) {
}
for _, tc := range testcases {
- if id, cookie, err := ParseKeyHash(tc.KeyHash); err != nil && !tc.Err {
+ if id, cookie, err := ParseNeedleIdCookie(tc.KeyHash); err != nil && !tc.Err {
t.Fatalf("Parse %s error: %v", tc.KeyHash, err)
} else if err == nil && tc.Err {
t.Fatalf("Parse %s expected error got nil", tc.KeyHash)
@@ -40,6 +43,6 @@ func BenchmarkParseKeyHash(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- ParseKeyHash("4ed44ed44ed44ed4c8116e41")
+ ParseNeedleIdCookie("4ed44ed44ed44ed4c8116e41")
}
}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 84ed1951d..ac125ef4b 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
const (
@@ -160,7 +161,7 @@ func (s *Store) SetRack(rack string) {
func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
var volumeMessages []*master_pb.VolumeInformationMessage
maxVolumeCount := 0
- var maxFileKey uint64
+ var maxFileKey NeedleId
for _, location := range s.Locations {
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
location.Lock()
@@ -199,7 +200,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
Port: uint32(s.Port),
PublicUrl: s.PublicUrl,
MaxVolumeCount: uint32(maxVolumeCount),
- MaxFileKey: maxFileKey,
+ MaxFileKey: NeedleIdToUint64(maxFileKey),
DataCenter: s.dataCenter,
Rack: s.rack,
Volumes: volumeMessages,
diff --git a/weed/storage/types/needle_id_type.go b/weed/storage/types/needle_id_type.go
new file mode 100644
index 000000000..cb19a6f45
--- /dev/null
+++ b/weed/storage/types/needle_id_type.go
@@ -0,0 +1,42 @@
+package types
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "strconv"
+)
+
+type NeedleId uint64
+
+const (
+ NeedleIdSize = 8
+)
+
+func NeedleIdToBytes(bytes []byte, needleId NeedleId) {
+ util.Uint64toBytes(bytes, uint64(needleId))
+}
+
+// NeedleIdToUint64 used to send max needle id to master
+func NeedleIdToUint64(needleId NeedleId) uint64 {
+ return uint64(needleId)
+}
+
+func Uint64ToNeedleId(needleId uint64) NeedleId {
+ return NeedleId(needleId)
+}
+
+func BytesToNeedleId(bytes []byte) NeedleId {
+ return NeedleId(util.BytesToUint64(bytes))
+}
+
+func (k NeedleId) String() string {
+ return strconv.FormatUint(uint64(k), 10)
+}
+
+func ParseNeedleId(idString string) (NeedleId, error) {
+ key, err := strconv.ParseUint(idString, 16, 64)
+ if err != nil {
+ return 0, fmt.Errorf("needle id %s format error: %v", idString, err)
+ }
+ return NeedleId(key), nil
+}
diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go
new file mode 100644
index 000000000..8a2054fc5
--- /dev/null
+++ b/weed/storage/types/needle_types.go
@@ -0,0 +1,52 @@
+package types
+
+import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "math"
+ "strconv"
+)
+
+type Offset uint32
+type Cookie uint32
+
+const (
+ OffsetSize = 4
+ SizeSize = 4 // uint32 size
+ NeedleEntrySize = NeedleIdSize + OffsetSize + SizeSize
+ NeedlePaddingSize = 8
+ MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8
+ TombstoneFileSize = math.MaxUint32
+ CookieSize = 4
+)
+
+func CookieToBytes(bytes []byte, cookie Cookie) {
+ util.Uint32toBytes(bytes, uint32(cookie))
+}
+func Uint32ToCookie(cookie uint32) Cookie {
+ return Cookie(cookie)
+}
+
+func BytesToCookie(bytes []byte) Cookie {
+ return Cookie(util.BytesToUint32(bytes[0:4]))
+}
+
+func ParseCookie(cookieString string) (Cookie, error) {
+ cookie, err := strconv.ParseUint(cookieString, 16, 32)
+ if err != nil {
+ return 0, fmt.Errorf("needle cookie %s format error: %v", cookieString, err)
+ }
+ return Cookie(cookie), nil
+}
+
+func OffsetToBytes(bytes []byte, offset Offset) {
+ util.Uint32toBytes(bytes, uint32(offset))
+}
+
+func Uint32ToOffset(offset uint32) Offset {
+ return Offset(offset)
+}
+
+func BytesToOffset(bytes []byte) Offset {
+ return Offset(util.BytesToUint32(bytes[0:4]))
+}
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index 5603a878b..c928ae9a2 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -4,12 +4,13 @@ import (
"fmt"
"os"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
func getActualSize(size uint32) int64 {
- padding := NeedlePaddingSize - ((NeedleHeaderSize + size + NeedleChecksumSize) % NeedlePaddingSize)
- return NeedleHeaderSize + int64(size) + NeedleChecksumSize + int64(padding)
+ padding := NeedlePaddingSize - ((NeedleEntrySize + size + NeedleChecksumSize) % NeedlePaddingSize)
+ return NeedleEntrySize + int64(size) + NeedleChecksumSize + int64(padding)
}
func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
@@ -22,10 +23,10 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
return nil
}
var lastIdxEntry []byte
- if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleIndexSize); e != nil {
+ if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleEntrySize); e != nil {
return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
}
- key, offset, size := idxFileEntry(lastIdxEntry)
+ key, offset, size := IdxFileEntry(lastIdxEntry)
if offset == 0 || size == TombstoneFileSize {
return nil
}
@@ -38,7 +39,7 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) {
if indexSize, err = util.GetFileSize(indexFile); err == nil {
- if indexSize%NeedleIndexSize != 0 {
+ if indexSize%NeedleEntrySize != 0 {
err = fmt.Errorf("index file's size is %d bytes, maybe corrupted", indexSize)
}
}
@@ -50,12 +51,12 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err
err = fmt.Errorf("offset %d for index file is invalid", offset)
return
}
- bytes = make([]byte, NeedleIndexSize)
+ bytes = make([]byte, NeedleEntrySize)
_, err = indexFile.ReadAt(bytes, offset)
return
}
-func verifyNeedleIntegrity(datFile *os.File, v Version, offset int64, key uint64, size uint32) error {
+func verifyNeedleIntegrity(datFile *os.File, v Version, offset int64, key NeedleId, size uint32) error {
n := new(Needle)
err := n.ReadData(datFile, offset, size, v)
if err != nil {
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index 457d50410..572220650 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -43,6 +43,8 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
if e != nil {
if !os.IsPermission(e) {
return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, e)
+ } else {
+ return fmt.Errorf("load data file %s.dat: %v", fileName, e)
}
}
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index 6572ea6c7..3e36bfb2e 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
// isFileUnchanged checks whether this needle to write is same as last one.
@@ -109,7 +110,7 @@ func (v *Volume) writeNeedle(n *Needle) (size uint32, err error) {
nv, ok := v.nm.Get(n.Id)
if !ok || int64(nv.Offset)*NeedlePaddingSize < offset {
- if err = v.nm.Put(n.Id, uint32(offset/NeedlePaddingSize), n.Size); err != nil {
+ if err = v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
}
@@ -134,7 +135,7 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) {
if err != nil {
return size, err
}
- if err := v.nm.Delete(n.Id, uint32(offset/NeedlePaddingSize)); err != nil {
+ if err := v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)); err != nil {
return size, err
}
n.Data = nil
@@ -197,7 +198,7 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
}
for n != nil {
if readNeedleBody {
- if err = n.ReadNeedleBody(v.dataFile, version, offset+int64(NeedleHeaderSize), rest); err != nil {
+ if err = n.ReadNeedleBody(v.dataFile, version, offset+NeedleEntrySize, rest); err != nil {
glog.V(0).Infof("cannot read needle body: %v", err)
//err = fmt.Errorf("cannot read needle body: %v", err)
//return
@@ -207,19 +208,23 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
// fixed in v0.69
// remove this whole "if" clause later, long after 0.69
oldRest, oldSize := rest, n.Size
- padding := NeedlePaddingSize - ((n.Size + NeedleHeaderSize + NeedleChecksumSize) % NeedlePaddingSize)
+ padding := NeedlePaddingSize - ((n.Size + NeedleEntrySize + NeedleChecksumSize) % NeedlePaddingSize)
n.Size = 0
- rest = n.Size + NeedleChecksumSize + padding
+ rest = int64(n.Size + NeedleChecksumSize + padding)
if rest%NeedlePaddingSize != 0 {
rest += (NeedlePaddingSize - rest%NeedlePaddingSize)
}
glog.V(4).Infof("Adjusting n.Size %d=>0 rest:%d=>%d %+v", oldSize, oldRest, rest, n)
}
}
- if err = visitNeedle(n, offset); err != nil {
+ err = visitNeedle(n, offset)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
glog.V(0).Infof("visit needle error: %v", err)
}
- offset += int64(NeedleHeaderSize) + int64(rest)
+ offset += NeedleEntrySize + rest
glog.V(4).Infof("==> new entry offset %d", offset)
if n, rest, err = ReadNeedleHeader(v.dataFile, version, offset); err != nil {
if err == io.EOF {
diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go
index b934fc59d..e808f888f 100644
--- a/weed/storage/volume_sync.go
+++ b/weed/storage/volume_sync.go
@@ -12,6 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -93,7 +94,7 @@ func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.Compact
if needleValue.Key == 0 {
return nil
}
- if _, ok := slaveMap.Get(uint64(needleValue.Key)); ok {
+ if _, ok := slaveMap.Get(needleValue.Key); ok {
return nil // skip intersection
}
delta = append(delta, needleValue)
@@ -147,12 +148,12 @@ func fetchVolumeFileEntries(volumeServer string, vid VolumeId) (m *needle.Compac
}
total := 0
- err = operation.GetVolumeIdxEntries(volumeServer, vid.String(), func(key uint64, offset, size uint32) {
+ err = operation.GetVolumeIdxEntries(volumeServer, vid.String(), func(key NeedleId, offset Offset, size uint32) {
// println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size)
if offset > 0 && size != TombstoneFileSize {
- m.Set(needle.Key(key), offset, size)
+ m.Set(NeedleId(key), offset, size)
} else {
- m.Delete(needle.Key(key))
+ m.Delete(NeedleId(key))
}
total++
})
@@ -179,9 +180,9 @@ func (v *Volume) IndexFileContent() ([]byte, error) {
}
// removeNeedle removes one needle by needle key
-func (v *Volume) removeNeedle(key needle.Key) {
+func (v *Volume) removeNeedle(key NeedleId) {
n := new(Needle)
- n.Id = uint64(key)
+ n.Id = key
v.deleteNeedle(n)
}
@@ -208,7 +209,7 @@ func (v *Volume) fetchNeedle(volumeDataContentHandlerUrl string,
return fmt.Errorf("Appending volume %d error: %v", v.Id, err)
}
// println("add key", needleValue.Key, "offset", offset, "size", needleValue.Size)
- v.nm.Put(uint64(needleValue.Key), uint32(offset/NeedlePaddingSize), needleValue.Size)
+ v.nm.Put(needleValue.Key, Offset(offset/NeedlePaddingSize), needleValue.Size)
return nil
})
}
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index c0bdcdf96..58ecc73cb 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -122,18 +123,18 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
}
type keyField struct {
- offset uint32
+ offset Offset
size uint32
}
- incrementedHasUpdatedIndexEntry := make(map[uint64]keyField)
+ incrementedHasUpdatedIndexEntry := make(map[NeedleId]keyField)
- for idx_offset := indexSize - NeedleIndexSize; uint64(idx_offset) >= v.lastCompactIndexOffset; idx_offset -= NeedleIndexSize {
+ for idx_offset := indexSize - NeedleEntrySize; uint64(idx_offset) >= v.lastCompactIndexOffset; idx_offset -= NeedleEntrySize {
var IdxEntry []byte
if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idx_offset); err != nil {
return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idx_offset, err)
}
- key, offset, size := idxFileEntry(IdxEntry)
- glog.V(0).Infof("key %d offset %d size %d", key, offset, size)
+ key, offset, size := IdxFileEntry(IdxEntry)
+ glog.V(4).Infof("key %d offset %d size %d", key, offset, size)
if _, found := incrementedHasUpdatedIndexEntry[key]; !found {
incrementedHasUpdatedIndexEntry[key] = keyField{
offset: offset,
@@ -170,11 +171,11 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return fmt.Errorf("oldDatFile %s 's compact revision is %d while newDatFile %s 's compact revision is %d", oldDatFileName, oldDatCompactRevision, newDatFileName, newDatCompactRevision)
}
- idx_entry_bytes := make([]byte, 16)
+ idx_entry_bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize)
for key, incre_idx_entry := range incrementedHasUpdatedIndexEntry {
- util.Uint64toBytes(idx_entry_bytes[0:8], key)
- util.Uint32toBytes(idx_entry_bytes[8:12], incre_idx_entry.offset)
- util.Uint32toBytes(idx_entry_bytes[12:16], incre_idx_entry.size)
+ NeedleIdToBytes(idx_entry_bytes[0:NeedleIdSize], key)
+ OffsetToBytes(idx_entry_bytes[NeedleIdSize:NeedleIdSize+OffsetSize], incre_idx_entry.offset)
+ util.Uint32toBytes(idx_entry_bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], incre_idx_entry.size)
var offset int64
if offset, err = dst.Seek(0, 2); err != nil {
@@ -193,7 +194,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
//updated needle
if incre_idx_entry.offset != 0 && incre_idx_entry.size != 0 && incre_idx_entry.size != TombstoneFileSize {
//even the needle cache in memory is hit, the need_bytes is correct
- glog.V(0).Infof("file %d offset %d size %d", key, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size)
+ glog.V(4).Infof("file %d offset %d size %d", key, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size)
var needle_bytes []byte
needle_bytes, err = ReadNeedleBlob(oldDatFile, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size)
if err != nil {
@@ -255,7 +256,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
nv, ok := v.nm.Get(n.Id)
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 {
- if err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil {
+ if err = nm.Put(n.Id, Offset(new_offset/NeedlePaddingSize), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
if _, _, err := n.Append(dst, v.Version()); err != nil {
@@ -296,7 +297,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
dst.Write(v.SuperBlock.Bytes())
new_offset := int64(v.SuperBlock.BlockSize())
- WalkIndexFile(oldIndexFile, func(key uint64, offset, size uint32) error {
+ WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset == 0 || size == TombstoneFileSize {
return nil
}
@@ -315,7 +316,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
if nv.Offset == offset && nv.Size > 0 {
- if err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil {
+ if err = nm.Put(n.Id, Offset(new_offset/NeedlePaddingSize), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
if _, _, err = n.Append(dst, v.Version()); err != nil {
diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go
index 53c50b743..464d52618 100644
--- a/weed/storage/volume_vacuum_test.go
+++ b/weed/storage/volume_vacuum_test.go
@@ -1,6 +1,7 @@
package storage
import (
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"math/rand"
"os"
@@ -129,12 +130,12 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
size: size,
crc: n.Checksum,
}
- println("written file", i, "checksum", n.Checksum.Value(), "size", size)
+ // println("written file", i, "checksum", n.Checksum.Value(), "size", size)
if rand.Float64() < 0.5 {
toBeDeleted := rand.Intn(i) + 1
oldNeedle := newEmptyNeedle(uint64(toBeDeleted))
v.deleteNeedle(oldNeedle)
- println("deleted file", toBeDeleted)
+ // println("deleted file", toBeDeleted)
infos[toBeDeleted-1] = &needleInfo{
size: 0,
crc: n.Checksum,
@@ -153,12 +154,12 @@ func newRandomNeedle(id uint64) *Needle {
rand.Read(n.Data)
n.Checksum = NewCRC(n.Data)
- n.Id = id
+ n.Id = types.Uint64ToNeedleId(id)
return n
}
func newEmptyNeedle(id uint64) *Needle {
n := new(Needle)
- n.Id = id
+ n.Id = types.Uint64ToNeedleId(id)
return n
}
diff --git a/weed/tools/read_index.go b/weed/tools/read_index.go
index 642ff786b..d53f489ea 100644
--- a/weed/tools/read_index.go
+++ b/weed/tools/read_index.go
@@ -7,6 +7,7 @@ import (
"os"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
var (
@@ -21,8 +22,8 @@ func main() {
}
defer indexFile.Close()
- storage.WalkIndexFile(indexFile, func(key uint64, offset, size uint32) error {
- fmt.Printf("key %d, offset %d, size %d, nextOffset %d\n", key, offset*8, size, offset*8+size)
+ storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
+ fmt.Printf("key %d, offset %d, size %d, nextOffset %d\n", key, offset*8, size, int64(offset)*types.NeedlePaddingSize+int64(size))
return nil
})
}
diff --git a/weed/topology/configuration.go b/weed/topology/configuration.go
index 42c7c140e..cb635658f 100644
--- a/weed/topology/configuration.go
+++ b/weed/topology/configuration.go
@@ -20,8 +20,8 @@ type topology struct {
DataCenters []dataCenter `xml:"DataCenter"`
}
type Configuration struct {
- XMLName xml.Name `xml:"Configuration"`
- Topo topology `xml:"Topology"`
+ XMLName xml.Name `xml:"Configuration"`
+ Topo topology `xml:"Topology"`
ip2location map[string]loc // this is not used any more. leave it here for later.
}
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index cee156dc1..177c2a181 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -6,10 +6,10 @@ import (
"github.com/chrislusf/raft"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
type Topology struct {
@@ -132,7 +132,11 @@ func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
}
func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
glog.Infof("removing volume info:%+v", v)
- t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl).UnRegisterVolume(&v, dn)
+ volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
+ volumeLayout.UnRegisterVolume(&v, dn)
+ if volumeLayout.isEmpty() {
+ t.DeleteCollection(v.Collection)
+ }
}
func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter {
diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go
index 36aa07157..07dc9c67b 100644
--- a/weed/topology/topology_test.go
+++ b/weed/topology/topology_test.go
@@ -1,9 +1,10 @@
package topology
import (
- "testing"
- "github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/sequence"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "testing"
)
func TestRemoveDataCenter(t *testing.T) {
@@ -26,7 +27,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25)
{
- volumeCount := 700
+ volumeCount := 7
var volumeMessages []*master_pb.VolumeInformationMessage
for k := 1; k <= volumeCount; k++ {
volumeMessage := &master_pb.VolumeInformationMessage{
@@ -51,7 +52,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
}
{
- volumeCount := 700 - 1
+ volumeCount := 7 - 1
var volumeMessages []*master_pb.VolumeInformationMessage
for k := 1; k <= volumeCount; k++ {
volumeMessage := &master_pb.VolumeInformationMessage{
@@ -85,3 +86,39 @@ func assert(t *testing.T, message string, actual, expected int) {
t.Fatalf("unexpected %s: %d, expected: %d", message, actual, expected)
}
}
+
+func TestAddRemoveVolume(t *testing.T) {
+
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+
+ dc := topo.GetOrCreateDataCenter("dc1")
+ rack := dc.GetOrCreateRack("rack1")
+ dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25)
+
+ v := storage.VolumeInfo{
+ Id: storage.VolumeId(1),
+ Size: 100,
+ Collection: "xcollection",
+ FileCount: 123,
+ DeleteCount: 23,
+ DeletedByteCount: 45,
+ ReadOnly: false,
+ Version: storage.CurrentVersion,
+ ReplicaPlacement: &storage.ReplicaPlacement{},
+ Ttl: storage.EMPTY_TTL,
+ }
+
+ dn.UpdateVolumes([]storage.VolumeInfo{v})
+ topo.RegisterVolumeLayout(v, dn)
+
+ if _, hasCollection := topo.FindCollection(v.Collection); !hasCollection {
+ t.Errorf("collection %v should exist", v.Collection)
+ }
+
+ topo.UnRegisterVolumeLayout(v, dn)
+
+ if _, hasCollection := topo.FindCollection(v.Collection); hasCollection {
+ t.Errorf("collection %v should not exist", v.Collection)
+ }
+
+}
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 79ed70c0f..44a25565e 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -107,6 +107,13 @@ func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
!v.ReadOnly
}
+func (vl *VolumeLayout) isEmpty() bool {
+ vl.accessLock.RLock()
+ defer vl.accessLock.RUnlock()
+
+ return len(vl.vid2location) == 0
+}
+
func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 2c768ab32..9edf8f59d 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -1,5 +1,5 @@
package util
const (
- VERSION = "0.91 beta"
+ VERSION = "0.92 beta"
)