aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lu <chrislusf@users.noreply.github.com>2018-07-21 20:14:38 -0700
committerGitHub <noreply@github.com>2018-07-21 20:14:38 -0700
commit3423c1da18487e4dc3d77a024f9c0d5d3b7599cf (patch)
treecc72caa73fadbdb81659c1f13bb87f33c502fbc1
parentc98df05ed0fc78e8585c6dd7d2ae317c7c42d9c3 (diff)
parent49375d603177e4134d0cb4128324a2dd70521290 (diff)
downloadseaweedfs-3423c1da18487e4dc3d77a024f9c0d5d3b7599cf.tar.xz
seaweedfs-3423c1da18487e4dc3d77a024f9c0d5d3b7599cf.zip
Merge pull request #693 from chrislusf/add_s3
Add "weed s3" to support S3 API
-rw-r--r--weed/command/command.go1
-rw-r--r--weed/command/export.go2
-rw-r--r--weed/command/filer_copy.go12
-rw-r--r--weed/command/mount_std.go42
-rw-r--r--weed/command/s3.go76
-rw-r--r--weed/filer2/abstract_sql/abstract_sql_store.go4
-rw-r--r--weed/filer2/cassandra/cassandra_store.go2
-rw-r--r--weed/filer2/filer.go55
-rw-r--r--weed/filer2/filer_master.go4
-rw-r--r--weed/filer2/filerstore.go1
-rw-r--r--weed/filer2/memdb/memdb_store.go2
-rw-r--r--weed/filer2/memdb/memdb_store_test.go2
-rw-r--r--weed/filesys/dir.go2
-rw-r--r--weed/filesys/dir_rename.go2
-rw-r--r--weed/filesys/dirty_page.go8
-rw-r--r--weed/filesys/filehandle.go2
-rw-r--r--weed/filesys/wfs.go10
-rw-r--r--weed/operation/assign_file_id.go5
-rw-r--r--weed/operation/sync_volume.go6
-rw-r--r--weed/pb/filer.proto11
-rw-r--r--weed/pb/filer_pb/filer.pb.go199
-rw-r--r--weed/s3api/AmazonS3.xsd692
-rw-r--r--weed/s3api/README.txt7
-rw-r--r--weed/s3api/s3api_bucket_handlers.go177
-rw-r--r--weed/s3api/s3api_errors.go88
-rw-r--r--weed/s3api/s3api_handlers.go100
-rw-r--r--weed/s3api/s3api_object_handlers.go163
-rw-r--r--weed/s3api/s3api_server.go113
-rw-r--r--weed/s3api/s3api_xsd_generated.go1002
-rw-r--r--weed/server/filer_grpc_server.go12
-rw-r--r--weed/server/filer_server.go4
-rw-r--r--weed/server/filer_server_handlers_read.go4
-rw-r--r--weed/server/filer_server_handlers_write.go70
-rw-r--r--weed/server/filer_server_handlers_write_autochunk.go15
-rw-r--r--weed/server/filer_server_handlers_write_monopart.go139
-rw-r--r--weed/server/filer_server_handlers_write_multipart.go39
-rw-r--r--weed/server/raft_server_handlers.go2
-rw-r--r--weed/server/volume_grpc_client.go2
-rw-r--r--weed/server/volume_server.go2
-rw-r--r--weed/server/volume_server_handlers_sync.go2
-rw-r--r--weed/server/volume_server_handlers_write.go4
-rw-r--r--weed/storage/file_id.go10
-rw-r--r--weed/storage/needle.go12
-rw-r--r--weed/storage/needle/btree_map.go2
-rw-r--r--weed/storage/needle/compact_map.go2
-rw-r--r--weed/storage/needle/compact_map_perf_test.go8
-rw-r--r--weed/storage/needle/compact_map_test.go2
-rw-r--r--weed/storage/needle/needle_value.go2
-rw-r--r--weed/storage/needle_map.go4
-rw-r--r--weed/storage/needle_map_boltdb.go4
-rw-r--r--weed/storage/needle_map_leveldb.go2
-rw-r--r--weed/storage/needle_map_memory.go2
-rw-r--r--weed/storage/needle_map_metric.go4
-rw-r--r--weed/storage/needle_map_metric_test.go6
-rw-r--r--weed/storage/needle_parse_multipart.go6
-rw-r--r--weed/storage/needle_read_write.go32
-rw-r--r--weed/storage/needle_test.go2
-rw-r--r--weed/storage/store.go2
-rw-r--r--weed/storage/types/needle_id_type.go6
-rw-r--r--weed/storage/types/needle_types.go12
-rw-r--r--weed/storage/volume_vacuum.go2
-rw-r--r--weed/storage/volume_vacuum_test.go2
62 files changed, 2796 insertions, 413 deletions
diff --git a/weed/command/command.go b/weed/command/command.go
index c451936e5..c6b005dd9 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -16,6 +16,7 @@ var Commands = []*Command{
cmdServer,
cmdMaster,
cmdFiler,
+ cmdS3,
cmdUpload,
cmdDownload,
cmdShell,
diff --git a/weed/command/export.go b/weed/command/export.go
index b78b3f601..529ee47e3 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -125,7 +125,7 @@ func runExport(cmd *Command, args []string) bool {
t := time.Now()
tarHeader = tar.Header{Mode: 0644,
ModTime: t, Uid: os.Getuid(), Gid: os.Getgid(),
- Typeflag: tar.TypeReg,
+ Typeflag: tar.TypeReg,
AccessTime: t, ChangeTime: t}
}
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index 9937bc9d6..218abf645 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -8,16 +8,16 @@ import (
"path/filepath"
"strings"
+ "context"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "path"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "io"
"net/http"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "path"
"strconv"
- "io"
"time"
- "context"
- "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -77,7 +77,7 @@ func runCopy(cmd *Command, args []string) bool {
return false
}
filerDestination := args[len(args)-1]
- fileOrDirs := args[0: len(args)-1]
+ fileOrDirs := args[0 : len(args)-1]
filerUrl, err := url.Parse(filerDestination)
if err != nil {
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 4905df986..242ed4dc8 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -11,8 +11,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
- "strings"
"strconv"
+ "strings"
)
func runMount(cmd *Command, args []string) bool {
@@ -53,27 +53,14 @@ func runMount(cmd *Command, args []string) bool {
c.Close()
})
- hostnameAndPort := strings.Split(*mountOptions.filer, ":")
- if len(hostnameAndPort) != 2 {
- fmt.Printf("The filer should have hostname:port format: %v\n", hostnameAndPort)
- return false
- }
-
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- fmt.Printf("The filer filer port parse error: %v\n", parseErr)
+ filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort)
+ if err != nil {
+ glog.Fatal(err)
return false
}
- filerGrpcPort := filerPort + 10000
- if *mountOptions.filerGrpcPort != 0 {
- filerGrpcPort = uint64(*copy.filerGrpcPort)
- }
-
- filerAddress := fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort)
-
err = fs.Serve(c, filesys.NewSeaweedFileSystem(
- filerAddress, *mountOptions.filerMountRootPath, *mountOptions.collection, *mountOptions.replication, int32(*mountOptions.ttlSec),
+ filerGrpcAddress, *mountOptions.filerMountRootPath, *mountOptions.collection, *mountOptions.replication, int32(*mountOptions.ttlSec),
*mountOptions.chunkSizeLimitMB, *mountOptions.dataCenter))
if err != nil {
fuse.Unmount(*mountOptions.dir)
@@ -87,3 +74,22 @@ func runMount(cmd *Command, args []string) bool {
return true
}
+
+func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) {
+ hostnameAndPort := strings.Split(filer, ":")
+ if len(hostnameAndPort) != 2 {
+ return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort)
+ }
+
+ filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("The filer filer port parse error: %v", parseErr)
+ }
+
+ filerGrpcPort := int(filerPort) + 10000
+ if optionalGrpcPort != 0 {
+ filerGrpcPort = optionalGrpcPort
+ }
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
+}
diff --git a/weed/command/s3.go b/weed/command/s3.go
new file mode 100644
index 000000000..1966cb954
--- /dev/null
+++ b/weed/command/s3.go
@@ -0,0 +1,76 @@
+package command
+
+import (
+ "net/http"
+ "time"
+
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/s3api"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/gorilla/mux"
+)
+
+var (
+ s3options S3Options
+)
+
+type S3Options struct {
+ filer *string
+ filerGrpcPort *int
+ filerBucketsPath *string
+ port *int
+ domainName *string
+}
+
+func init() {
+ cmdS3.Run = runS3 // break init cycle
+ s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
+ s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000")
+ s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/s3buckets", "folder on filer to store all buckets")
+ s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port")
+ s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
+}
+
+var cmdS3 = &Command{
+ UsageLine: "s3 -port=8333 -filer=<ip:port>",
+ Short: "start a s3 API compatible server that is backed by a filer",
+ Long: `start a s3 API compatible server that is backed by a filer.
+
+`,
+}
+
+func runS3(cmd *Command, args []string) bool {
+
+ filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ router := mux.NewRouter().SkipClean(true)
+
+ _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
+ Filer: *s3options.filer,
+ FilerGrpcAddress: filerGrpcAddress,
+ DomainName: *s3options.domainName,
+ BucketsPath: *s3options.filerBucketsPath,
+ })
+ if s3ApiServer_err != nil {
+ glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
+ }
+
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at port %d", util.VERSION, *s3options.port)
+ s3ApiListener, e := util.NewListener(fmt.Sprintf(":%d", *s3options.port), time.Duration(10)*time.Second)
+ if e != nil {
+ glog.Fatalf("S3 API Server listener error: %v", e)
+ }
+
+ httpS := &http.Server{Handler: router}
+ if err := httpS.Serve(s3ApiListener); err != nil {
+ glog.Fatalf("S3 API Server Fail to serve: %v", e)
+ }
+
+ return true
+
+}
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
index 82ef571b6..5f2990475 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer2/abstract_sql/abstract_sql_store.go
@@ -64,7 +64,7 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
- return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
+ return nil, filer2.ErrNotFound
}
entry := &filer2.Entry{
@@ -77,7 +77,7 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
return entry, nil
}
-func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) (error) {
+func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
dir, name := fullpath.DirAndName()
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
index d731cd99c..7552cb524 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer2/cassandra/cassandra_store.go
@@ -68,7 +68,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
- return nil, fmt.Errorf("read entry %s: %v", fullpath, err)
+ return nil, filer2.ErrNotFound
}
}
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
index a98194bc8..2deb8ffd5 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer2/filer.go
@@ -4,12 +4,13 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/karlseguin/ccache"
"os"
"path/filepath"
"strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/operation"
)
type Filer struct {
@@ -112,7 +113,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
- f.deleteChunks(oldEntry)
+ f.deleteChunksIfNotNew(oldEntry, entry)
return nil
}
@@ -125,7 +126,7 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
return f.store.FindEntry(p)
}
-func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err error) {
+func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) {
entry, err := f.FindEntry(p)
if err != nil {
return err
@@ -136,14 +137,20 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err
if err != nil {
return fmt.Errorf("list folder %s: %v", p, err)
}
- if len(entries) > 0 {
- return fmt.Errorf("folder %s is not empty", p)
+ if isRecursive {
+ for _, sub := range entries {
+ f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks)
+ }
+ } else {
+ if len(entries) > 0 {
+ return fmt.Errorf("folder %s is not empty", p)
+ }
}
f.cacheDelDirectory(string(p))
}
if shouldDeleteChunks {
- f.deleteChunks(entry)
+ f.deleteChunks(entry.Chunks)
}
return f.store.DeleteEntry(p)
@@ -151,14 +158,14 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, shouldDeleteChunks bool) (err
func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
- p = p[0: len(p)-1]
+ p = p[0 : len(p)-1]
}
return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit)
}
func (f *Filer) cacheDelDirectory(dirpath string) {
if f.directoryCache == nil {
- return
+ return
}
f.directoryCache.Delete(dirpath)
return
@@ -189,14 +196,36 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
-func (f *Filer) deleteChunks(entry *Entry) {
+func (f *Filer) deleteChunks(chunks []*filer_pb.FileChunk) {
+ for _, chunk := range chunks {
+ if err := operation.DeleteFile(f.GetMaster(), chunk.FileId, ""); err != nil {
+ glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
+ }
+ }
+}
+
+func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
- if entry == nil {
+ if oldEntry == nil {
return
}
- for _, chunk := range entry.Chunks {
- if err := operation.DeleteFile(f.GetMaster(), chunk.FileId, ""); err != nil {
- glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
+ if newEntry == nil {
+ f.deleteChunks(oldEntry.Chunks)
+ }
+
+ var toDelete []*filer_pb.FileChunk
+
+ for _, oldChunk := range oldEntry.Chunks {
+ found := false
+ for _, newChunk := range newEntry.Chunks {
+ if oldChunk.FileId == newChunk.FileId {
+ found = true
+ break
+ }
+ }
+ if !found {
+ toDelete = append(toDelete, oldChunk)
}
}
+ f.deleteChunks(toDelete)
}
diff --git a/weed/filer2/filer_master.go b/weed/filer2/filer_master.go
index 51b12c237..63c3ef452 100644
--- a/weed/filer2/filer_master.go
+++ b/weed/filer2/filer_master.go
@@ -1,12 +1,12 @@
package filer2
import (
- "fmt"
"context"
+ "fmt"
"time"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
index b169c6f80..68fc06a5d 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer2/filerstore.go
@@ -11,6 +11,7 @@ type FilerStore interface {
Initialize(configuration Configuration) error
InsertEntry(*Entry) error
UpdateEntry(*Entry) (err error)
+ // err == filer2.ErrNotFound if not found
FindEntry(FullPath) (entry *Entry, err error)
DeleteEntry(FullPath) (err error)
ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go
index 48a9bea31..08cee0afd 100644
--- a/weed/filer2/memdb/memdb_store.go
+++ b/weed/filer2/memdb/memdb_store.go
@@ -49,7 +49,7 @@ func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) {
func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
if item == nil {
- return nil, nil
+ return nil, filer2.ErrNotFound
}
entry = item.(entryItem).Entry
return entry, nil
diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go
index 84ee06ce1..cf813e04b 100644
--- a/weed/filer2/memdb/memdb_store_test.go
+++ b/weed/filer2/memdb/memdb_store_test.go
@@ -134,7 +134,7 @@ func TestCreateFileAndList(t *testing.T) {
}
// delete file and count
- filer.DeleteEntryMetaAndData(file3Path, false)
+ filer.DeleteEntryMetaAndData(file3Path, false, false)
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index b8183037b..744eeac2b 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -9,9 +9,9 @@ import (
"bazil.org/fuse"
"bazil.org/fuse/fs"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/filer2"
)
type Dir struct {
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index a89c51b31..7ba515202 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -1,9 +1,9 @@
package filesys
import (
- "context"
"bazil.org/fuse"
"bazil.org/fuse/fs"
+ "context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath"
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 6a7b557c2..a9307f246 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -1,14 +1,14 @@
package filesys
import (
- "fmt"
"bytes"
- "time"
"context"
+ "fmt"
+ "time"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type ContinuousDirtyPages struct {
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 03eed0a6f..0c13db984 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -9,9 +9,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "net/http"
"strings"
"sync"
- "net/http"
)
type FileHandle struct {
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index 2f885d0af..6a89c654e 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -1,15 +1,15 @@
package filesys
import (
+ "bazil.org/fuse"
"bazil.org/fuse/fs"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/karlseguin/ccache"
- "sync"
- "bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/karlseguin/ccache"
"strings"
+ "sync"
)
type WFS struct {
@@ -30,7 +30,7 @@ type WFS struct {
func NewSeaweedFileSystem(filerGrpcAddress string, filerMountRootPath string, collection string, replication string, ttlSec int32, chunkSizeLimitMB int, dataCenter string) *WFS {
if filerMountRootPath != "/" && strings.HasSuffix(filerMountRootPath, "/") {
- filerMountRootPath = filerMountRootPath[0:len(filerMountRootPath)-1]
+ filerMountRootPath = filerMountRootPath[0 : len(filerMountRootPath)-1]
}
return &WFS{
filerGrpcAddress: filerGrpcAddress,
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index c2e1e4444..169fd664d 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -59,8 +59,9 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
values.Add("dataNode", request.DataNode)
}
- jsonBlob, err := util.Post("http://"+server+"/dir/assign", values)
- glog.V(2).Infof("assign result from %s : %s", server, string(jsonBlob))
+ postUrl := fmt.Sprintf("http://%s/dir/assign", server)
+ jsonBlob, err := util.Post(postUrl, values)
+ glog.V(2).Infof("assign %d result from %s %+v : %s", i, postUrl, values, string(jsonBlob))
if err != nil {
return nil, err
}
diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go
index 5be312450..d4434b318 100644
--- a/weed/operation/sync_volume.go
+++ b/weed/operation/sync_volume.go
@@ -6,8 +6,8 @@ import (
"net/url"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type SyncVolumeResponse struct {
@@ -44,8 +44,8 @@ func GetVolumeIdxEntries(server string, vid string, eachEntryFn func(key NeedleI
line := make([]byte, NeedleEntrySize)
err := util.GetBufferStream("http://"+server+"/admin/sync/index", values, line, func(bytes []byte) {
key := BytesToNeedleId(line[:NeedleIdSize])
- offset := BytesToOffset(line[NeedleIdSize:NeedleIdSize+OffsetSize])
- size := util.BytesToUint32(line[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize])
+ offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize])
+ size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
eachEntryFn(key, offset, size)
})
if err != nil {
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index fe1fe4d8d..a01ee7a2e 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -30,6 +30,9 @@ service SeaweedFiler {
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
}
+ rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -117,6 +120,7 @@ message DeleteEntryRequest {
string name = 2;
bool is_directory = 3;
bool is_delete_data = 4;
+ bool is_recursive = 5;
}
message DeleteEntryResponse {
@@ -152,3 +156,10 @@ message Location {
message LookupVolumeResponse {
map<string, Locations> locations_map = 1;
}
+
+message DeleteCollectionRequest {
+ string collection = 1;
+}
+
+message DeleteCollectionResponse {
+}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 0756c6c7c..d7f279a17 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -32,6 +32,8 @@ It has these top-level messages:
Locations
Location
LookupVolumeResponse
+ DeleteCollectionRequest
+ DeleteCollectionResponse
*/
package filer_pb
@@ -452,6 +454,7 @@ type DeleteEntryRequest struct {
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
IsDirectory bool `protobuf:"varint,3,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"`
+ IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"`
}
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
@@ -487,6 +490,13 @@ func (m *DeleteEntryRequest) GetIsDeleteData() bool {
return false
}
+func (m *DeleteEntryRequest) GetIsRecursive() bool {
+ if m != nil {
+ return m.IsRecursive
+ }
+ return false
+}
+
type DeleteEntryResponse struct {
}
@@ -655,6 +665,30 @@ func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
return nil
}
+type DeleteCollectionRequest struct {
+ Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
+}
+
+func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
+func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteCollectionRequest) ProtoMessage() {}
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *DeleteCollectionRequest) GetCollection() string {
+ if m != nil {
+ return m.Collection
+ }
+ return ""
+}
+
+type DeleteCollectionResponse struct {
+}
+
+func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
+func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteCollectionResponse) ProtoMessage() {}
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
@@ -679,6 +713,8 @@ func init() {
proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
proto.RegisterType((*Location)(nil), "filer_pb.Location")
proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
+ proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest")
+ proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -700,6 +736,7 @@ type SeaweedFilerClient interface {
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
+ DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
}
type seaweedFilerClient struct {
@@ -782,6 +819,15 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR
return out, nil
}
+func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
+ out := new(DeleteCollectionResponse)
+ err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for SeaweedFiler service
type SeaweedFilerServer interface {
@@ -793,6 +839,7 @@ type SeaweedFilerServer interface {
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
+ DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -943,6 +990,24 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteCollectionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).DeleteCollection(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/DeleteCollection",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -979,6 +1044,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "LookupVolume",
Handler: _SeaweedFiler_LookupVolume_Handler,
},
+ {
+ MethodName: "DeleteCollection",
+ Handler: _SeaweedFiler_DeleteCollection_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "filer.proto",
@@ -987,67 +1056,71 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 990 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xdc, 0x44,
- 0x14, 0x8e, 0xd7, 0xbb, 0x9b, 0xf5, 0xd9, 0x4d, 0x81, 0xd9, 0xb4, 0x98, 0x6d, 0x52, 0x96, 0xa1,
- 0x45, 0xa9, 0x90, 0xa2, 0x28, 0x70, 0x51, 0x81, 0x90, 0xa8, 0x92, 0x52, 0x55, 0x4a, 0x55, 0xc9,
- 0x21, 0x48, 0x5c, 0xad, 0x1c, 0xfb, 0x64, 0x19, 0xc5, 0x6b, 0x2f, 0x9e, 0x71, 0x50, 0xb8, 0xe5,
- 0x92, 0x0b, 0x9e, 0x02, 0xf1, 0x02, 0xbc, 0x01, 0x2f, 0x86, 0xe6, 0xc7, 0xde, 0xf1, 0xda, 0xdb,
- 0x9f, 0x8b, 0xde, 0xcd, 0x9c, 0x9f, 0xef, 0x7c, 0xc7, 0x3e, 0xe7, 0xb3, 0x61, 0x78, 0xc5, 0x12,
- 0xcc, 0x0f, 0x97, 0x79, 0x26, 0x32, 0x32, 0x50, 0x97, 0xd9, 0xf2, 0x92, 0xbe, 0x82, 0xfb, 0x67,
- 0x59, 0x76, 0x5d, 0x2c, 0x4f, 0x59, 0x8e, 0x91, 0xc8, 0xf2, 0xdb, 0x67, 0xa9, 0xc8, 0x6f, 0x03,
- 0xfc, 0xb5, 0x40, 0x2e, 0xc8, 0x1e, 0x78, 0x71, 0xe9, 0xf0, 0x9d, 0xa9, 0x73, 0xe0, 0x05, 0x2b,
- 0x03, 0x21, 0xd0, 0x4d, 0xc3, 0x05, 0xfa, 0x1d, 0xe5, 0x50, 0x67, 0xfa, 0x0c, 0xf6, 0xda, 0x01,
- 0xf9, 0x32, 0x4b, 0x39, 0x92, 0x47, 0xd0, 0x43, 0x69, 0x50, 0x68, 0xc3, 0xe3, 0x0f, 0x0e, 0x4b,
- 0x2a, 0x87, 0x3a, 0x4e, 0x7b, 0xe9, 0x31, 0x90, 0x33, 0xc6, 0x85, 0xb4, 0x31, 0xe4, 0x6f, 0x45,
- 0x87, 0x7e, 0x0f, 0xe3, 0x5a, 0x8e, 0xa9, 0xf8, 0x18, 0xb6, 0x51, 0x9b, 0x7c, 0x67, 0xea, 0xb6,
- 0xd5, 0x2c, 0xfd, 0xf4, 0x6f, 0x07, 0x7a, 0xca, 0x54, 0xb5, 0xe6, 0xac, 0x5a, 0x23, 0x9f, 0xc1,
- 0x88, 0xf1, 0xd9, 0x8a, 0x80, 0x6c, 0x7b, 0x10, 0x0c, 0x19, 0xaf, 0x5a, 0x25, 0x5f, 0x42, 0x3f,
- 0xfa, 0xa5, 0x48, 0xaf, 0xb9, 0xef, 0xaa, 0x52, 0xe3, 0x55, 0xa9, 0x1f, 0x58, 0x82, 0x27, 0xd2,
- 0x17, 0x98, 0x10, 0xf2, 0x04, 0x20, 0x14, 0x22, 0x67, 0x97, 0x85, 0x40, 0xee, 0x77, 0xd5, 0xf3,
- 0xf0, 0xad, 0x84, 0x82, 0xe3, 0xd3, 0xca, 0x1f, 0x58, 0xb1, 0xf4, 0x0a, 0xbc, 0x0a, 0x8e, 0x7c,
- 0x0c, 0xdb, 0x32, 0x67, 0xc6, 0x62, 0xc3, 0xb6, 0x2f, 0xaf, 0x2f, 0x62, 0x72, 0x0f, 0xfa, 0xd9,
- 0xd5, 0x15, 0x47, 0xa1, 0x98, 0xba, 0x81, 0xb9, 0xc9, 0xde, 0x38, 0xfb, 0x1d, 0x7d, 0x77, 0xea,
- 0x1c, 0x74, 0x03, 0x75, 0x26, 0xbb, 0xd0, 0x5b, 0x08, 0xb6, 0x40, 0x45, 0xc3, 0x0d, 0xf4, 0x85,
- 0xfe, 0xd9, 0x81, 0x3b, 0x75, 0x1a, 0xe4, 0x3e, 0x78, 0xaa, 0x9a, 0x42, 0x70, 0x14, 0x82, 0x9a,
- 0xa6, 0xf3, 0x1a, 0x4a, 0xc7, 0x42, 0xa9, 0x52, 0x16, 0x59, 0xac, 0x8b, 0xee, 0xe8, 0x94, 0x97,
- 0x59, 0x8c, 0xe4, 0x43, 0x70, 0x0b, 0x16, 0xab, 0xb2, 0x3b, 0x81, 0x3c, 0x4a, 0xcb, 0x9c, 0xc5,
- 0x7e, 0x4f, 0x5b, 0xe6, 0x4c, 0x35, 0x12, 0xe5, 0x0a, 0xb7, 0xaf, 0x1b, 0xd1, 0x37, 0xd9, 0xc8,
- 0x42, 0x5a, 0xb7, 0xf5, 0x4b, 0x92, 0x67, 0x32, 0x85, 0x61, 0x8e, 0xcb, 0x84, 0x45, 0xa1, 0x60,
- 0x59, 0xea, 0x0f, 0x94, 0xcb, 0x36, 0x91, 0x07, 0x00, 0x51, 0x96, 0x24, 0x18, 0xa9, 0x00, 0x4f,
- 0x05, 0x58, 0x16, 0xf9, 0x3c, 0x85, 0x48, 0x66, 0x1c, 0x23, 0x1f, 0xa6, 0xce, 0x41, 0x2f, 0xe8,
- 0x0b, 0x91, 0x9c, 0x63, 0x44, 0xe7, 0xf0, 0xc9, 0x73, 0x54, 0xe3, 0x75, 0x6b, 0xbd, 0x17, 0x33,
- 0x9a, 0x6d, 0x03, 0xb3, 0x0f, 0xb0, 0x0c, 0x73, 0x4c, 0x85, 0x1c, 0x1a, 0xb3, 0x25, 0x9e, 0xb6,
- 0x9c, 0xb2, 0xdc, 0x7e, 0x71, 0xae, 0xfd, 0xe2, 0xe8, 0x1f, 0x0e, 0x4c, 0xda, 0x2a, 0x99, 0x81,
- 0xae, 0xcf, 0x8d, 0xf3, 0xf6, 0x73, 0x63, 0x8d, 0x67, 0xe7, 0x8d, 0xe3, 0x49, 0x8f, 0xe0, 0xee,
- 0x73, 0x14, 0xca, 0x9e, 0xa5, 0x02, 0x53, 0x51, 0xb6, 0xba, 0x69, 0xe0, 0xe8, 0x31, 0xdc, 0x5b,
- 0xcf, 0x30, 0x94, 0x7d, 0xd8, 0x8e, 0xb4, 0x49, 0xa5, 0x8c, 0x82, 0xf2, 0x4a, 0x7f, 0x06, 0x72,
- 0x92, 0x63, 0x28, 0xf0, 0x1d, 0x74, 0xa7, 0xd2, 0x90, 0xce, 0x6b, 0x35, 0xe4, 0x2e, 0x8c, 0x6b,
- 0xd0, 0x9a, 0x8b, 0xac, 0x78, 0xb1, 0x8c, 0xdf, 0x57, 0xc5, 0x1a, 0xb4, 0xa9, 0xf8, 0x97, 0x03,
- 0xe4, 0x14, 0x13, 0x7c, 0xa7, 0x92, 0x2d, 0xe2, 0xda, 0x50, 0x20, 0xb7, 0xa9, 0x40, 0x0f, 0xe1,
- 0x8e, 0x0c, 0x51, 0xd5, 0x66, 0x71, 0x28, 0x42, 0xb5, 0x5a, 0x83, 0x60, 0xc4, 0xb8, 0xa6, 0x70,
- 0x1a, 0x8a, 0x50, 0x12, 0xad, 0x11, 0x32, 0x44, 0xff, 0x71, 0x60, 0xfc, 0x94, 0x73, 0x36, 0x4f,
- 0x7f, 0xca, 0x92, 0x62, 0x81, 0x25, 0xd3, 0x5d, 0xe8, 0x45, 0x59, 0x61, 0x5e, 0x5e, 0x2f, 0xd0,
- 0x97, 0xb5, 0x45, 0xea, 0x34, 0x16, 0x69, 0x6d, 0x15, 0xdd, 0xe6, 0x2a, 0x5a, 0xab, 0xd6, 0xb5,
- 0x57, 0x8d, 0x7c, 0x0a, 0x43, 0xc9, 0x7d, 0x16, 0x61, 0x2a, 0x30, 0x57, 0x5a, 0xe0, 0x05, 0x20,
- 0x4d, 0x27, 0xca, 0x42, 0x6f, 0x60, 0xb7, 0x4e, 0xd4, 0x0c, 0xda, 0x46, 0x31, 0x94, 0x3a, 0x93,
- 0x27, 0x86, 0xa5, 0x3c, 0xaa, 0xed, 0x2c, 0x2e, 0x13, 0x16, 0xcd, 0xa4, 0xc3, 0x35, 0xdb, 0xa9,
- 0x2c, 0x17, 0x79, 0xb2, 0xea, 0xb9, 0x6b, 0xf5, 0x4c, 0xbf, 0x86, 0xb1, 0xfe, 0xbc, 0xd5, 0x1f,
- 0xd0, 0x3e, 0xc0, 0x8d, 0x32, 0xcc, 0x58, 0xac, 0x3f, 0x33, 0x5e, 0xe0, 0x69, 0xcb, 0x8b, 0x98,
- 0xd3, 0xef, 0xc0, 0x3b, 0xcb, 0x74, 0xcf, 0x9c, 0x1c, 0x81, 0x97, 0x94, 0x17, 0xf3, 0x45, 0x22,
- 0xab, 0x79, 0x2a, 0xe3, 0x82, 0x55, 0x10, 0xfd, 0x16, 0x06, 0xa5, 0xb9, 0xec, 0xc3, 0xd9, 0xd4,
- 0x47, 0x67, 0xad, 0x0f, 0xfa, 0x9f, 0x03, 0xbb, 0x75, 0xca, 0xe6, 0x51, 0x5d, 0xc0, 0x4e, 0x55,
- 0x62, 0xb6, 0x08, 0x97, 0x86, 0xcb, 0x91, 0xcd, 0xa5, 0x99, 0x56, 0x11, 0xe4, 0x2f, 0xc3, 0xa5,
- 0x9e, 0x9e, 0x51, 0x62, 0x99, 0x26, 0x3f, 0xc2, 0x47, 0x8d, 0x10, 0xc9, 0xfa, 0x1a, 0xcb, 0x21,
- 0x97, 0x47, 0xf2, 0x18, 0x7a, 0x37, 0x61, 0x52, 0xa0, 0xd9, 0xa8, 0x71, 0xf3, 0x09, 0xf0, 0x40,
- 0x47, 0x7c, 0xd3, 0x79, 0xe2, 0x1c, 0xff, 0xdb, 0x83, 0xd1, 0x39, 0x86, 0xbf, 0x21, 0xc6, 0x52,
- 0x5f, 0x72, 0x32, 0x2f, 0xbb, 0xaa, 0xff, 0x67, 0x90, 0x47, 0xeb, 0xf4, 0x5b, 0x7f, 0x6c, 0x26,
- 0x5f, 0xbc, 0x29, 0xcc, 0x6c, 0xc4, 0x16, 0x39, 0x83, 0xa1, 0xf5, 0x57, 0x41, 0xf6, 0xac, 0xc4,
- 0xc6, 0x0f, 0xca, 0x64, 0x7f, 0x83, 0xb7, 0x42, 0x0b, 0x81, 0x34, 0x95, 0x9d, 0x7c, 0xbe, 0x4a,
- 0xdb, 0xf8, 0x85, 0x99, 0x3c, 0x7c, 0x7d, 0x90, 0x4d, 0xd8, 0x92, 0x3d, 0x9b, 0x70, 0x53, 0x68,
- 0x6d, 0xc2, 0x6d, 0x5a, 0xa9, 0xd0, 0x2c, 0x49, 0xb3, 0xd1, 0x9a, 0x22, 0x6a, 0xa3, 0xb5, 0xe9,
- 0xa0, 0x42, 0xb3, 0x74, 0xc7, 0x46, 0x6b, 0xea, 0xa3, 0x8d, 0xd6, 0x26, 0x56, 0x5b, 0xe4, 0x15,
- 0x8c, 0x6c, 0x11, 0x20, 0x56, 0x42, 0x8b, 0x8a, 0x4d, 0x1e, 0x6c, 0x72, 0xdb, 0x80, 0xf6, 0xcc,
- 0xdb, 0x80, 0x2d, 0x5b, 0x6f, 0x03, 0xb6, 0xad, 0x0a, 0xdd, 0xba, 0xec, 0xab, 0xff, 0xed, 0xaf,
- 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x65, 0x12, 0xc7, 0x9d, 0x7e, 0x0b, 0x00, 0x00,
+ // 1049 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6e, 0xdb, 0x46,
+ 0x10, 0x0e, 0x45, 0x4b, 0x16, 0x47, 0x72, 0x9a, 0xae, 0x9c, 0x84, 0x55, 0xec, 0x54, 0xd9, 0x26,
+ 0x85, 0x83, 0x02, 0x86, 0xe1, 0xf6, 0x90, 0xb6, 0x28, 0xd0, 0xc0, 0x4e, 0x83, 0x00, 0x0e, 0x02,
+ 0xd0, 0x75, 0x81, 0xa2, 0x07, 0x81, 0x26, 0xc7, 0xea, 0xc2, 0x14, 0xa9, 0x72, 0x97, 0x2e, 0xdc,
+ 0x6b, 0x8f, 0x3d, 0xf5, 0x21, 0x8a, 0xde, 0xfb, 0x0a, 0x7d, 0xb1, 0x62, 0x7f, 0x48, 0x2d, 0x45,
+ 0xca, 0x49, 0x0e, 0xbd, 0xed, 0xce, 0xce, 0x7c, 0x33, 0xdf, 0x72, 0xe6, 0x5b, 0x09, 0x06, 0x17,
+ 0x2c, 0xc1, 0x7c, 0x7f, 0x91, 0x67, 0x22, 0x23, 0x7d, 0xb5, 0x99, 0x2e, 0xce, 0xe9, 0x1b, 0x78,
+ 0x70, 0x92, 0x65, 0x97, 0xc5, 0xe2, 0x98, 0xe5, 0x18, 0x89, 0x2c, 0xbf, 0x7e, 0x91, 0x8a, 0xfc,
+ 0x3a, 0xc0, 0x5f, 0x0a, 0xe4, 0x82, 0xec, 0x80, 0x17, 0x97, 0x07, 0xbe, 0x33, 0x71, 0xf6, 0xbc,
+ 0x60, 0x69, 0x20, 0x04, 0x36, 0xd2, 0x70, 0x8e, 0x7e, 0x47, 0x1d, 0xa8, 0x35, 0x7d, 0x01, 0x3b,
+ 0xed, 0x80, 0x7c, 0x91, 0xa5, 0x1c, 0xc9, 0x13, 0xe8, 0xa2, 0x34, 0x28, 0xb4, 0xc1, 0xe1, 0x07,
+ 0xfb, 0x65, 0x29, 0xfb, 0xda, 0x4f, 0x9f, 0xd2, 0x43, 0x20, 0x27, 0x8c, 0x0b, 0x69, 0x63, 0xc8,
+ 0xdf, 0xa9, 0x1c, 0xfa, 0x2d, 0x8c, 0x6a, 0x31, 0x26, 0xe3, 0x53, 0xd8, 0x44, 0x6d, 0xf2, 0x9d,
+ 0x89, 0xdb, 0x96, 0xb3, 0x3c, 0xa7, 0x7f, 0x39, 0xd0, 0x55, 0xa6, 0x8a, 0x9a, 0xb3, 0xa4, 0x46,
+ 0x1e, 0xc1, 0x90, 0xf1, 0xe9, 0xb2, 0x00, 0x49, 0xbb, 0x1f, 0x0c, 0x18, 0xaf, 0xa8, 0x92, 0xcf,
+ 0xa0, 0x17, 0xfd, 0x5c, 0xa4, 0x97, 0xdc, 0x77, 0x55, 0xaa, 0xd1, 0x32, 0xd5, 0x77, 0x2c, 0xc1,
+ 0x23, 0x79, 0x16, 0x18, 0x17, 0xf2, 0x0c, 0x20, 0x14, 0x22, 0x67, 0xe7, 0x85, 0x40, 0xee, 0x6f,
+ 0xa8, 0xfb, 0xf0, 0xad, 0x80, 0x82, 0xe3, 0xf3, 0xea, 0x3c, 0xb0, 0x7c, 0xe9, 0x05, 0x78, 0x15,
+ 0x1c, 0xb9, 0x0f, 0x9b, 0x32, 0x66, 0xca, 0x62, 0x53, 0x6d, 0x4f, 0x6e, 0x5f, 0xc5, 0xe4, 0x1e,
+ 0xf4, 0xb2, 0x8b, 0x0b, 0x8e, 0x42, 0x55, 0xea, 0x06, 0x66, 0x27, 0xb9, 0x71, 0xf6, 0x1b, 0xfa,
+ 0xee, 0xc4, 0xd9, 0xdb, 0x08, 0xd4, 0x9a, 0x6c, 0x43, 0x77, 0x2e, 0xd8, 0x1c, 0x55, 0x19, 0x6e,
+ 0xa0, 0x37, 0xf4, 0x8f, 0x0e, 0xdc, 0xae, 0x97, 0x41, 0x1e, 0x80, 0xa7, 0xb2, 0x29, 0x04, 0x47,
+ 0x21, 0xa8, 0x6e, 0x3a, 0xad, 0xa1, 0x74, 0x2c, 0x94, 0x2a, 0x64, 0x9e, 0xc5, 0x3a, 0xe9, 0x96,
+ 0x0e, 0x79, 0x9d, 0xc5, 0x48, 0xee, 0x80, 0x5b, 0xb0, 0x58, 0xa5, 0xdd, 0x0a, 0xe4, 0x52, 0x5a,
+ 0x66, 0x2c, 0xf6, 0xbb, 0xda, 0x32, 0x63, 0x8a, 0x48, 0x94, 0x2b, 0xdc, 0x9e, 0x26, 0xa2, 0x77,
+ 0x92, 0xc8, 0x5c, 0x5a, 0x37, 0xf5, 0x47, 0x92, 0x6b, 0x32, 0x81, 0x41, 0x8e, 0x8b, 0x84, 0x45,
+ 0xa1, 0x60, 0x59, 0xea, 0xf7, 0xd5, 0x91, 0x6d, 0x22, 0x0f, 0x01, 0xa2, 0x2c, 0x49, 0x30, 0x52,
+ 0x0e, 0x9e, 0x72, 0xb0, 0x2c, 0xf2, 0x3e, 0x85, 0x48, 0xa6, 0x1c, 0x23, 0x1f, 0x26, 0xce, 0x5e,
+ 0x37, 0xe8, 0x09, 0x91, 0x9c, 0x62, 0x44, 0x67, 0xf0, 0xd1, 0x4b, 0x54, 0xed, 0x75, 0x6d, 0x7d,
+ 0x17, 0xd3, 0x9a, 0x6d, 0x0d, 0xb3, 0x0b, 0xb0, 0x08, 0x73, 0x4c, 0x85, 0x6c, 0x1a, 0x33, 0x25,
+ 0x9e, 0xb6, 0x1c, 0xb3, 0xdc, 0xfe, 0x70, 0xae, 0xfd, 0xe1, 0xe8, 0xef, 0x0e, 0x8c, 0xdb, 0x32,
+ 0x99, 0x86, 0xae, 0xf7, 0x8d, 0xf3, 0xee, 0x7d, 0x63, 0xb5, 0x67, 0xe7, 0xad, 0xed, 0x49, 0x0f,
+ 0xe0, 0xee, 0x4b, 0x14, 0xca, 0x9e, 0xa5, 0x02, 0x53, 0x51, 0x52, 0x5d, 0xd7, 0x70, 0xf4, 0x10,
+ 0xee, 0xad, 0x46, 0x98, 0x92, 0x7d, 0xd8, 0x8c, 0xb4, 0x49, 0x85, 0x0c, 0x83, 0x72, 0x4b, 0x7f,
+ 0x04, 0x72, 0x94, 0x63, 0x28, 0xf0, 0x3d, 0x74, 0xa7, 0xd2, 0x90, 0xce, 0x8d, 0x1a, 0x72, 0x17,
+ 0x46, 0x35, 0x68, 0x5d, 0x8b, 0xcc, 0x78, 0xb6, 0x88, 0xff, 0xaf, 0x8c, 0x35, 0x68, 0x93, 0xf1,
+ 0x1f, 0x07, 0xc8, 0x31, 0x26, 0xf8, 0x5e, 0x29, 0x5b, 0xc4, 0xb5, 0xa1, 0x40, 0x6e, 0x53, 0x81,
+ 0x1e, 0xc3, 0x6d, 0xe9, 0xa2, 0xb2, 0x4d, 0xe3, 0x50, 0x84, 0x6a, 0xb4, 0xfa, 0xc1, 0x90, 0x71,
+ 0x5d, 0xc2, 0x71, 0x28, 0x42, 0x03, 0x94, 0x63, 0x54, 0xe4, 0x9c, 0x5d, 0xa1, 0x1a, 0x36, 0x05,
+ 0x14, 0x94, 0x26, 0xc9, 0xa5, 0x56, 0xb3, 0xe1, 0xf2, 0xb7, 0x03, 0xa3, 0xe7, 0x9c, 0xb3, 0x59,
+ 0xfa, 0x43, 0x96, 0x14, 0x73, 0x2c, 0xc9, 0x6c, 0x43, 0x37, 0xca, 0x0a, 0xf3, 0x7d, 0xbb, 0x81,
+ 0xde, 0xac, 0xcc, 0x5a, 0xa7, 0x31, 0x6b, 0x2b, 0xd3, 0xea, 0x36, 0xa7, 0xd5, 0x9a, 0xc6, 0x0d,
+ 0x7b, 0x1a, 0xc9, 0xc7, 0x30, 0x90, 0xf4, 0xa6, 0x11, 0xa6, 0x02, 0x73, 0xc5, 0xc0, 0x0b, 0x40,
+ 0x9a, 0x8e, 0x94, 0x85, 0x5e, 0xc1, 0x76, 0xbd, 0x50, 0xd3, 0x8b, 0x6b, 0xf5, 0x52, 0x4a, 0x51,
+ 0x9e, 0x98, 0x2a, 0xe5, 0x52, 0x0d, 0x70, 0x71, 0x9e, 0xb0, 0x68, 0x2a, 0x0f, 0x5c, 0x33, 0xc0,
+ 0xca, 0x72, 0x96, 0x27, 0x4b, 0xce, 0x1b, 0x16, 0x67, 0xfa, 0x05, 0x8c, 0xf4, 0x0b, 0x58, 0xbf,
+ 0xa0, 0x5d, 0x80, 0x2b, 0x65, 0x98, 0xb2, 0x58, 0xbf, 0x44, 0x5e, 0xe0, 0x69, 0xcb, 0xab, 0x98,
+ 0xd3, 0x6f, 0xc0, 0x3b, 0xc9, 0x34, 0x67, 0x4e, 0x0e, 0xc0, 0x4b, 0xca, 0x8d, 0x79, 0xb4, 0xc8,
+ 0xb2, 0xe5, 0x4a, 0xbf, 0x60, 0xe9, 0x44, 0xbf, 0x86, 0x7e, 0x69, 0x2e, 0x79, 0x38, 0xeb, 0x78,
+ 0x74, 0x56, 0x78, 0xd0, 0x7f, 0x1d, 0xd8, 0xae, 0x97, 0x6c, 0xae, 0xea, 0x0c, 0xb6, 0xaa, 0x14,
+ 0xd3, 0x79, 0xb8, 0x30, 0xb5, 0x1c, 0xd8, 0xb5, 0x34, 0xc3, 0xaa, 0x02, 0xf9, 0xeb, 0x70, 0xa1,
+ 0xbb, 0x67, 0x98, 0x58, 0xa6, 0xf1, 0xf7, 0xf0, 0x61, 0xc3, 0x45, 0x56, 0x7d, 0x89, 0xe5, 0x1c,
+ 0xc8, 0x25, 0x79, 0x0a, 0xdd, 0xab, 0x30, 0x29, 0xd0, 0x0c, 0xdd, 0xa8, 0x79, 0x03, 0x3c, 0xd0,
+ 0x1e, 0x5f, 0x75, 0x9e, 0x39, 0xf4, 0x4b, 0xb8, 0xaf, 0x1b, 0xf6, 0xa8, 0xea, 0xaf, 0xf2, 0xee,
+ 0xeb, 0x6d, 0xe8, 0xac, 0xb6, 0x21, 0x1d, 0x83, 0xdf, 0x0c, 0xd5, 0x64, 0x0e, 0xff, 0xec, 0xc1,
+ 0xf0, 0x14, 0xc3, 0x5f, 0x11, 0x63, 0xa9, 0x6c, 0x39, 0x99, 0x95, 0x97, 0x55, 0xff, 0x85, 0x43,
+ 0x9e, 0xac, 0xde, 0x4a, 0xeb, 0x4f, 0xaa, 0xf1, 0xa7, 0x6f, 0x73, 0x33, 0x83, 0x76, 0x8b, 0x9c,
+ 0xc0, 0xc0, 0xfa, 0x3d, 0x43, 0x76, 0xac, 0xc0, 0xc6, 0x4f, 0xa3, 0xf1, 0xee, 0x9a, 0xd3, 0x0a,
+ 0x2d, 0x04, 0xd2, 0x7c, 0x53, 0xc8, 0x27, 0xcb, 0xb0, 0xb5, 0x6f, 0xdb, 0xf8, 0xf1, 0xcd, 0x4e,
+ 0x76, 0xc1, 0x96, 0xe0, 0xda, 0x05, 0x37, 0x25, 0xde, 0x2e, 0xb8, 0x4d, 0xa5, 0x15, 0x9a, 0x25,
+ 0xa6, 0x36, 0x5a, 0x53, 0xbe, 0x6d, 0xb4, 0x36, 0x05, 0x56, 0x68, 0x96, 0x9c, 0xd9, 0x68, 0x4d,
+ 0x65, 0xb6, 0xd1, 0xda, 0x34, 0xf0, 0x16, 0x79, 0x03, 0x43, 0x5b, 0x5b, 0x88, 0x15, 0xd0, 0x22,
+ 0x8e, 0xe3, 0x87, 0xeb, 0x8e, 0x6d, 0x40, 0x7b, 0x94, 0x6c, 0xc0, 0x16, 0x31, 0xb1, 0x01, 0xdb,
+ 0x26, 0x90, 0xde, 0x22, 0x3f, 0xc1, 0x9d, 0xd5, 0x96, 0x26, 0x8f, 0x56, 0x69, 0x35, 0x26, 0x65,
+ 0x4c, 0x6f, 0x72, 0x29, 0xc1, 0xcf, 0x7b, 0xea, 0x6f, 0xc4, 0xe7, 0xff, 0x05, 0x00, 0x00, 0xff,
+ 0xff, 0x45, 0xa6, 0x48, 0x98, 0x55, 0x0c, 0x00, 0x00,
}
diff --git a/weed/s3api/AmazonS3.xsd b/weed/s3api/AmazonS3.xsd
new file mode 100644
index 000000000..8016a6a83
--- /dev/null
+++ b/weed/s3api/AmazonS3.xsd
@@ -0,0 +1,692 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<xsd:schema
+ xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+ elementFormDefault="qualified"
+ targetNamespace="http://s3.amazonaws.com/doc/2006-03-01/">
+
+ <xsd:element name="CreateBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="MetadataEntry">
+ <xsd:sequence>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Value" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="CreateBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="CreateBucketReturn" type="tns:CreateBucketResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="Status">
+ <xsd:sequence>
+ <xsd:element name="Code" type="xsd:int"/>
+ <xsd:element name="Description" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="Result">
+ <xsd:sequence>
+ <xsd:element name="Status" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="CreateBucketResult">
+ <xsd:sequence>
+ <xsd:element name="BucketName" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="DeleteBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="DeleteBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="DeleteBucketResponse" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="BucketLoggingStatus">
+ <xsd:sequence>
+ <xsd:element name="LoggingEnabled" type="tns:LoggingSettings" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="LoggingSettings">
+ <xsd:sequence>
+ <xsd:element name="TargetBucket" type="xsd:string"/>
+ <xsd:element name="TargetPrefix" type="xsd:string"/>
+ <xsd:element name="TargetGrants" type="tns:AccessControlList" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="GetBucketLoggingStatus">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketLoggingStatusResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetBucketLoggingStatusResponse" type="tns:BucketLoggingStatus"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketLoggingStatus">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="BucketLoggingStatus" type="tns:BucketLoggingStatus"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketLoggingStatusResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectAccessControlPolicyResponse" type="tns:AccessControlPolicy"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetBucketAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetBucketAccessControlPolicyResponse" type="tns:AccessControlPolicy"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType abstract="true" name="Grantee"/>
+
+ <xsd:complexType name="User" abstract="true">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Grantee"/>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="AmazonCustomerByEmail">
+ <xsd:complexContent>
+ <xsd:extension base="tns:User">
+ <xsd:sequence>
+ <xsd:element name="EmailAddress" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="CanonicalUser">
+ <xsd:complexContent>
+ <xsd:extension base="tns:User">
+ <xsd:sequence>
+ <xsd:element name="ID" type="xsd:string"/>
+ <xsd:element name="DisplayName" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="Group">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Grantee">
+ <xsd:sequence>
+ <xsd:element name="URI" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:simpleType name="Permission">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="READ"/>
+ <xsd:enumeration value="WRITE"/>
+ <xsd:enumeration value="READ_ACP"/>
+ <xsd:enumeration value="WRITE_ACP"/>
+ <xsd:enumeration value="FULL_CONTROL"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="StorageClass">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="STANDARD"/>
+ <xsd:enumeration value="REDUCED_REDUNDANCY"/>
+ <xsd:enumeration value="GLACIER"/>
+ <xsd:enumeration value="UNKNOWN"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="Grant">
+ <xsd:sequence>
+ <xsd:element name="Grantee" type="tns:Grantee"/>
+ <xsd:element name="Permission" type="tns:Permission"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="AccessControlList">
+ <xsd:sequence>
+ <xsd:element name="Grant" type="tns:Grant" minOccurs="0" maxOccurs="100"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="CreateBucketConfiguration">
+ <xsd:sequence>
+ <xsd:element name="LocationConstraint" type="tns:LocationConstraint"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="LocationConstraint">
+ <xsd:simpleContent>
+ <xsd:extension base="xsd:string"/>
+ </xsd:simpleContent>
+ </xsd:complexType>
+
+ <xsd:complexType name="AccessControlPolicy">
+ <xsd:sequence>
+ <xsd:element name="Owner" type="tns:CanonicalUser"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="SetObjectAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetObjectAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketAccessControlPolicy">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="SetBucketAccessControlPolicyResponse">
+ <xsd:complexType>
+ <xsd:sequence/>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="GetMetadata" type="xsd:boolean"/>
+ <xsd:element name="GetData" type="xsd:boolean"/>
+ <xsd:element name="InlineData" type="xsd:boolean"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectResponse" type="tns:GetObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="GetObjectResult">
+ <xsd:complexContent>
+ <xsd:extension base="tns:Result">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Data" type="xsd:base64Binary" nillable="true"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:extension>
+ </xsd:complexContent>
+ </xsd:complexType>
+
+ <xsd:element name="GetObjectExtended">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="GetMetadata" type="xsd:boolean"/>
+ <xsd:element name="GetData" type="xsd:boolean"/>
+ <xsd:element name="InlineData" type="xsd:boolean"/>
+ <xsd:element name="ByteRangeStart" type="xsd:long" minOccurs="0"/>
+ <xsd:element name="ByteRangeEnd" type="xsd:long" minOccurs="0"/>
+ <xsd:element name="IfModifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="IfUnmodifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="IfMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="IfNoneMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="ReturnCompleteObjectOnConditionFailure" type="xsd:boolean" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="GetObjectExtendedResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="GetObjectResponse" type="tns:GetObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="ContentLength" type="xsd:long"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="PutObjectResponse" type="tns:PutObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="PutObjectResult">
+ <xsd:sequence>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="PutObjectInline">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element minOccurs="0" maxOccurs="100" name="Metadata" type="tns:MetadataEntry"/>
+ <xsd:element name="Data" type="xsd:base64Binary"/>
+ <xsd:element name="ContentLength" type="xsd:long"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="PutObjectInlineResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="PutObjectInlineResponse" type="tns:PutObjectResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="DeleteObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="DeleteObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="DeleteObjectResponse" type="tns:Status"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListBucket">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Marker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int" minOccurs="0"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListBucketResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListBucketResponse" type="tns:ListBucketResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListVersionsResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListVersionsResponse" type="tns:ListVersionsResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="ListEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="Size" type="xsd:long"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="VersionEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="VersionId" type="xsd:string"/>
+ <xsd:element name="IsLatest" type="xsd:boolean"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ <xsd:element name="Size" type="xsd:long"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="DeleteMarkerEntry">
+ <xsd:sequence>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="VersionId" type="xsd:string"/>
+ <xsd:element name="IsLatest" type="xsd:boolean"/>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="Owner" type="tns:CanonicalUser" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="PrefixEntry">
+ <xsd:sequence>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListBucketResult">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ <xsd:element name="Marker" type="xsd:string"/>
+ <xsd:element name="NextMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="IsTruncated" type="xsd:boolean"/>
+ <xsd:element name="Contents" type="tns:ListEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="CommonPrefixes" type="tns:PrefixEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListVersionsResult">
+ <xsd:sequence>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="unbounded"/>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="Prefix" type="xsd:string"/>
+ <xsd:element name="KeyMarker" type="xsd:string"/>
+ <xsd:element name="VersionIdMarker" type="xsd:string"/>
+ <xsd:element name="NextKeyMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="NextVersionIdMarker" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="MaxKeys" type="xsd:int"/>
+ <xsd:element name="Delimiter" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="IsTruncated" type="xsd:boolean"/>
+ <xsd:choice minOccurs="0" maxOccurs="unbounded">
+ <xsd:element name="Version" type="tns:VersionEntry"/>
+ <xsd:element name="DeleteMarker" type="tns:DeleteMarkerEntry"/>
+ </xsd:choice>
+ <xsd:element name="CommonPrefixes" type="tns:PrefixEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="ListAllMyBuckets">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="ListAllMyBucketsResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="ListAllMyBucketsResponse" type="tns:ListAllMyBucketsResult"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="ListAllMyBucketsEntry">
+ <xsd:sequence>
+ <xsd:element name="Name" type="xsd:string"/>
+ <xsd:element name="CreationDate" type="xsd:dateTime"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListAllMyBucketsResult">
+ <xsd:sequence>
+ <xsd:element name="Owner" type="tns:CanonicalUser"/>
+ <xsd:element name="Buckets" type="tns:ListAllMyBucketsList"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="ListAllMyBucketsList">
+ <xsd:sequence>
+ <xsd:element name="Bucket" type="tns:ListAllMyBucketsEntry" minOccurs="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:element name="PostResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="Location" type="xsd:anyURI"/>
+ <xsd:element name="Bucket" type="xsd:string"/>
+ <xsd:element name="Key" type="xsd:string"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:simpleType name="MetadataDirective">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="COPY"/>
+ <xsd:enumeration value="REPLACE"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:element name="CopyObject">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="SourceBucket" type="xsd:string"/>
+ <xsd:element name="SourceKey" type="xsd:string"/>
+ <xsd:element name="DestinationBucket" type="xsd:string"/>
+ <xsd:element name="DestinationKey" type="xsd:string"/>
+ <xsd:element name="MetadataDirective" type="tns:MetadataDirective" minOccurs="0"/>
+ <xsd:element name="Metadata" type="tns:MetadataEntry" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="AccessControlList" type="tns:AccessControlList" minOccurs="0"/>
+ <xsd:element name="CopySourceIfModifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="CopySourceIfUnmodifiedSince" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="CopySourceIfMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="CopySourceIfNoneMatch" type="xsd:string" minOccurs="0" maxOccurs="100"/>
+ <xsd:element name="StorageClass" type="tns:StorageClass" minOccurs="0"/>
+ <xsd:element name="AWSAccessKeyId" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Timestamp" type="xsd:dateTime" minOccurs="0"/>
+ <xsd:element name="Signature" type="xsd:string" minOccurs="0"/>
+ <xsd:element name="Credential" type="xsd:string" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="CopyObjectResponse">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="CopyObjectResult" type="tns:CopyObjectResult" />
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:complexType name="CopyObjectResult">
+ <xsd:sequence>
+ <xsd:element name="LastModified" type="xsd:dateTime"/>
+ <xsd:element name="ETag" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="RequestPaymentConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Payer" type="tns:Payer" minOccurs="1" maxOccurs="1"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:simpleType name="Payer">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="BucketOwner"/>
+ <xsd:enumeration value="Requester"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="VersioningConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Status" type="tns:VersioningStatus" minOccurs="0"/>
+ <xsd:element name="MfaDelete" type="tns:MfaDeleteStatus" minOccurs="0"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:simpleType name="MfaDeleteStatus">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="Enabled"/>
+ <xsd:enumeration value="Disabled"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:simpleType name="VersioningStatus">
+ <xsd:restriction base="xsd:string">
+ <xsd:enumeration value="Enabled"/>
+ <xsd:enumeration value="Suspended"/>
+ </xsd:restriction>
+ </xsd:simpleType>
+
+ <xsd:complexType name="NotificationConfiguration">
+ <xsd:sequence>
+ <xsd:element name="TopicConfiguration" minOccurs="0" maxOccurs="unbounded" type="tns:TopicConfiguration"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+ <xsd:complexType name="TopicConfiguration">
+ <xsd:sequence>
+ <xsd:element name="Topic" minOccurs="1" maxOccurs="1" type="xsd:string"/>
+ <xsd:element name="Event" minOccurs="1" maxOccurs="unbounded" type="xsd:string"/>
+ </xsd:sequence>
+ </xsd:complexType>
+
+</xsd:schema> \ No newline at end of file
diff --git a/weed/s3api/README.txt b/weed/s3api/README.txt
new file mode 100644
index 000000000..10a18ff4d
--- /dev/null
+++ b/weed/s3api/README.txt
@@ -0,0 +1,7 @@
+see https://blog.aqwari.net/xml-schema-go/
+
+1. go get aqwari.net/xml/cmd/xsdgen
+2. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd
+
+
+
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
new file mode 100644
index 000000000..af05d0a93
--- /dev/null
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -0,0 +1,177 @@
+package s3api
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/gorilla/mux"
+ "net/http"
+ "os"
+ "time"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
+
+ var response ListAllMyBucketsResponse
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.ListEntriesRequest{
+ Directory: s3a.option.BucketsPath,
+ }
+
+ glog.V(4).Infof("read directory: %v", request)
+ resp, err := client.ListEntries(context.Background(), request)
+ if err != nil {
+ return fmt.Errorf("list buckets: %v", err)
+ }
+
+ var buckets []ListAllMyBucketsEntry
+ for _, entry := range resp.Entries {
+ if entry.IsDirectory {
+ buckets = append(buckets, ListAllMyBucketsEntry{
+ Name: entry.Name,
+ CreationDate: time.Unix(entry.Attributes.Crtime, 0),
+ })
+ }
+ }
+
+ response = ListAllMyBucketsResponse{
+ ListAllMyBucketsResponse: ListAllMyBucketsResult{
+ Owner: CanonicalUser{
+ ID: "",
+ DisplayName: "",
+ },
+ Buckets: ListAllMyBucketsList{
+ Bucket: buckets,
+ },
+ },
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseXML(w, encodeResponse(response))
+}
+
+func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.CreateEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Entry: &filer_pb.Entry{
+ Name: bucket,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ },
+ }
+
+ glog.V(1).Infof("create bucket: %v", request)
+ if _, err := client.CreateEntry(context.Background(), request); err != nil {
+ return fmt.Errorf("mkdir %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ // lazily create collection
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
+
+func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ ctx := context.Background()
+
+ // delete collection
+ deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
+ Collection: bucket,
+ }
+
+ glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
+ if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
+ return fmt.Errorf("delete collection %s: %v", bucket, err)
+ }
+
+ // delete bucket metadata
+ request := &filer_pb.DeleteEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Name: bucket,
+ IsDirectory: true,
+ IsDeleteData: false,
+ IsRecursive: true,
+ }
+
+ glog.V(1).Infof("delete bucket: %v", request)
+ if _, err := client.DeleteEntry(ctx, request); err != nil {
+ return fmt.Errorf("delete bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeResponse(w, http.StatusNoContent, nil, mimeNone)
+}
+
+func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.LookupDirectoryEntryRequest{
+ Directory: s3a.option.BucketsPath,
+ Name: bucket,
+ }
+
+ glog.V(1).Infof("lookup bucket: %v", request)
+ if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil {
+ return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ writeErrorResponse(w, ErrNoSuchBucket, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go
new file mode 100644
index 000000000..771b1dd82
--- /dev/null
+++ b/weed/s3api/s3api_errors.go
@@ -0,0 +1,88 @@
+package s3api
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+// APIError structure
+type APIError struct {
+ Code string
+ Description string
+ HTTPStatusCode int
+}
+
+// RESTErrorResponse - error response format
+type RESTErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string `xml:"Code" json:"Code"`
+ Message string `xml:"Message" json:"Message"`
+ Resource string `xml:"Resource" json:"Resource"`
+ RequestID string `xml:"RequestId" json:"RequestId"`
+}
+
+// ErrorCode type of error status.
+type ErrorCode int
+
+// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+const (
+ ErrNone ErrorCode = iota
+ ErrMethodNotAllowed
+ ErrBucketNotEmpty
+ ErrBucketAlreadyExists
+ ErrBucketAlreadyOwnedByYou
+ ErrNoSuchBucket
+ ErrInvalidBucketName
+ ErrInvalidDigest
+ ErrInternalError
+)
+
+// error code to APIError structure, these fields carry respective
+// descriptions for all the error responses.
+var errorCodeResponse = map[ErrorCode]APIError{
+ ErrMethodNotAllowed: {
+ Code: "MethodNotAllowed",
+ Description: "The specified method is not allowed against this resource.",
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ },
+ ErrBucketNotEmpty: {
+ Code: "BucketNotEmpty",
+ Description: "The bucket you tried to delete is not empty",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyExists: {
+ Code: "BucketAlreadyExists",
+ Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrBucketAlreadyOwnedByYou: {
+ Code: "BucketAlreadyOwnedByYou",
+ Description: "Your previous request to create the named bucket succeeded and you already own it.",
+ HTTPStatusCode: http.StatusConflict,
+ },
+ ErrInvalidBucketName: {
+ Code: "InvalidBucketName",
+ Description: "The specified bucket is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidDigest: {
+ Code: "InvalidDigest",
+ Description: "The Content-Md5 you specified is not valid.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNoSuchBucket: {
+ Code: "NoSuchBucket",
+ Description: "The specified bucket does not exist",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ ErrInternalError: {
+ Code: "InternalError",
+ Description: "We encountered an internal error, please try again.",
+ HTTPStatusCode: http.StatusInternalServerError,
+ },
+}
+
+// getAPIError provides API Error for input API error code.
+func getAPIError(code ErrorCode) APIError {
+ return errorCodeResponse[code]
+}
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
new file mode 100644
index 000000000..ab1fc7038
--- /dev/null
+++ b/weed/s3api/s3api_handlers.go
@@ -0,0 +1,100 @@
+package s3api
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+type mimeType string
+
+const (
+ mimeNone mimeType = ""
+ mimeJSON mimeType = "application/json"
+ mimeXML mimeType = "application/xml"
+)
+
+func setCommonHeaders(w http.ResponseWriter) {
+ w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
+ w.Header().Set("Accept-Ranges", "bytes")
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ e := xml.NewEncoder(&bytesBuffer)
+ e.Encode(response)
+ return bytesBuffer.Bytes()
+}
+
+func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress)
+ if err != nil {
+ return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err)
+ }
+ defer grpcConnection.Close()
+
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+
+ return fn(client)
+}
+
+// If none of the http routes match respond with MethodNotAllowed
+func notFoundHandler(w http.ResponseWriter, r *http.Request) {
+ writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
+}
+
+func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) {
+ apiError := getAPIError(errorCode)
+ errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
+ encodedErrorResponse := encodeResponse(errorResponse)
+ writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
+}
+
+func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
+ return RESTErrorResponse{
+ Code: err.Code,
+ Message: err.Description,
+ Resource: resource,
+ RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
+ }
+}
+
+func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
+ setCommonHeaders(w)
+ if mType != mimeNone {
+ w.Header().Set("Content-Type", string(mType))
+ }
+ w.WriteHeader(statusCode)
+ if response != nil {
+ w.Write(response)
+ w.(http.Flusher).Flush()
+ }
+}
+
+func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
+ writeResponse(w, http.StatusOK, response, mimeXML)
+}
+
+func writeSuccessResponseEmpty(w http.ResponseWriter) {
+ writeResponse(w, http.StatusOK, nil, mimeNone)
+}
+
+func validateContentMd5(h http.Header) ([]byte, error) {
+ md5B64, ok := h["Content-Md5"]
+ if ok {
+ if md5B64[0] == "" {
+ return nil, fmt.Errorf("Content-Md5 header set to empty value")
+ }
+ return base64.StdEncoding.DecodeString(md5B64[0])
+ }
+ return []byte{}, nil
+}
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
new file mode 100644
index 000000000..6c1895cbc
--- /dev/null
+++ b/weed/s3api/s3api_object_handlers.go
@@ -0,0 +1,163 @@
+package s3api
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/gorilla/mux"
+ "io/ioutil"
+ "net/http"
+ "io"
+)
+
+var (
+ client *http.Client
+)
+
+func init() {
+ client = &http.Client{Transport: &http.Transport{
+ MaxIdleConnsPerHost: 1024,
+ }}
+}
+
+type UploadResult struct {
+ Name string `json:"name,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+ object := vars["object"]
+
+ _, err := validateContentMd5(r.Header)
+ if err != nil {
+ writeErrorResponse(w, ErrInvalidDigest, r.URL)
+ return
+ }
+
+ uploadUrl := fmt.Sprintf("http://%s%s/%s/%s?collection=%s",
+ s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket)
+ proxyReq, err := http.NewRequest("PUT", uploadUrl, r.Body)
+
+ if err != nil {
+ glog.Errorf("NewRequest %s: %v", uploadUrl, err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ proxyReq.Header.Set("Host", s3a.option.Filer)
+ proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
+
+ for header, values := range r.Header {
+ for _, value := range values {
+ proxyReq.Header.Add(header, value)
+ }
+ }
+
+ resp, postErr := client.Do(proxyReq)
+
+ if postErr != nil {
+ glog.Errorf("post to filer: %v", postErr)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ defer resp.Body.Close()
+
+ resp_body, ra_err := ioutil.ReadAll(resp.Body)
+ if ra_err != nil {
+ glog.Errorf("upload to filer response read: %v", ra_err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ var ret UploadResult
+ unmarshal_err := json.Unmarshal(resp_body, &ret)
+ if unmarshal_err != nil {
+ glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ if ret.Error != "" {
+ glog.Errorf("upload to filer error: %v", ret.Error)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ writeSuccessResponseEmpty(w)
+}
+
+func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, passThroghResponse)
+
+}
+
+func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, passThroghResponse)
+
+}
+
+func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
+
+ destUrl := fmt.Sprintf("http://%s%s%s",
+ s3a.option.Filer, s3a.option.BucketsPath, r.RequestURI)
+
+ s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResonse.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(http.StatusNoContent)
+ })
+
+}
+
+func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) {
+
+ glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
+
+ proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
+
+ if err != nil {
+ glog.Errorf("NewRequest %s: %v", destUrl, err)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ proxyReq.Header.Set("Host", s3a.option.Filer)
+ proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
+
+ for header, values := range r.Header {
+ for _, value := range values {
+ proxyReq.Header.Add(header, value)
+ }
+ }
+
+ resp, postErr := client.Do(proxyReq)
+
+ if postErr != nil {
+ glog.Errorf("post to filer: %v", postErr)
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ defer resp.Body.Close()
+
+ responseFn(resp, w)
+}
+func passThroghResponse(proxyResonse *http.Response, w http.ResponseWriter) {
+ for k, v := range proxyResonse.Header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(proxyResonse.StatusCode)
+ io.Copy(w, proxyResonse.Body)
+}
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
new file mode 100644
index 000000000..a4bb6b32d
--- /dev/null
+++ b/weed/s3api/s3api_server.go
@@ -0,0 +1,113 @@
+package s3api
+
+import (
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
+ "github.com/gorilla/mux"
+ "net/http"
+)
+
+type S3ApiServerOption struct {
+ Filer string
+ FilerGrpcAddress string
+ DomainName string
+ BucketsPath string
+}
+
+type S3ApiServer struct {
+ option *S3ApiServerOption
+}
+
+func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
+ s3ApiServer = &S3ApiServer{
+ option: option,
+ }
+
+ s3ApiServer.registerRouter(router)
+
+ return s3ApiServer, nil
+}
+
+func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
+ // API Router
+ apiRouter := router.PathPrefix("/").Subrouter()
+ var routers []*mux.Router
+ if s3a.option.DomainName != "" {
+ routers = append(routers, apiRouter.Host("{bucket:.+}." + s3a.option.DomainName).Subrouter())
+ }
+ routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
+
+ for _, bucket := range routers {
+
+ // PutObject
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler)
+ // GetObject
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler)
+ // HeadObject
+ bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler)
+ // DeleteObject
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler)
+
+ // PutBucket
+ bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler)
+ // DeleteBucket
+ bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler)
+ // HeadBucket
+ bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler)
+
+ /*
+ // CopyObject
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler)
+
+ // CopyObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ // PutObjectPart
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ // ListObjectPxarts
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
+ // CompleteMultipartUpload
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ // NewMultipartUpload
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "")
+ // AbortMultipartUpload
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+
+ // ListMultipartUploads
+ bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "")
+ // ListObjectsV2
+ bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2")
+ // ListObjectsV1 (Legacy)
+ bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler)
+ // DeleteMultipleObjects
+ bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "")
+
+ // not implemented
+ // GetBucketLocation
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketLocationHandler).Queries("location", "")
+ // GetBucketPolicy
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "")
+ // GetObjectACL
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectACLHandler).Queries("acl", "")
+ // GetBucketACL
+ bucket.Methods("GET").HandlerFunc(s3a.GetBucketACLHandler).Queries("acl", "")
+ // PutBucketPolicy
+ bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
+ // DeleteBucketPolicy
+ bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "")
+ // PostPolicy
+ bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler)
+ */
+
+ }
+
+ // ListBuckets
+ apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler)
+
+ // NotFound
+ apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
+
+}
diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go
new file mode 100644
index 000000000..915b74ec4
--- /dev/null
+++ b/weed/s3api/s3api_xsd_generated.go
@@ -0,0 +1,1002 @@
+package s3api
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "time"
+)
+
+type AccessControlList struct {
+ Grant []Grant `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grant,omitempty"`
+}
+
+type AccessControlPolicy struct {
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
+}
+
+type AmazonCustomerByEmail struct {
+ EmailAddress string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ EmailAddress"`
+}
+
+type BucketLoggingStatus struct {
+ LoggingEnabled LoggingSettings `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LoggingEnabled,omitempty"`
+}
+
+type CanonicalUser struct {
+ ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"`
+ DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"`
+}
+
+type CopyObject struct {
+ SourceBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceBucket"`
+ SourceKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceKey"`
+ DestinationBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationBucket"`
+ DestinationKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationKey"`
+ MetadataDirective MetadataDirective `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MetadataDirective,omitempty"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ CopySourceIfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ CopySourceIfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfMatch,omitempty"`
+ CopySourceIfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfNoneMatch,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *CopyObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CopyObject
+ var layout struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince)
+ layout.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CopyObject
+ var overlay struct {
+ *T
+ CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"`
+ CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince)
+ overlay.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CopyObjectResponse struct {
+ CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"`
+}
+
+type CopyObjectResult struct {
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+}
+
+func (t *CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CopyObjectResult
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CopyObjectResult
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CreateBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+}
+
+func (t *CreateBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T CreateBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T CreateBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type CreateBucketConfiguration struct {
+ LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"`
+}
+
+type CreateBucketResponse struct {
+ CreateBucketReturn CreateBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketReturn"`
+}
+
+type CreateBucketResult struct {
+ BucketName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketName"`
+}
+
+type DeleteBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *DeleteBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteBucketResponse struct {
+ DeleteBucketResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteBucketResponse"`
+}
+
+type DeleteMarkerEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
+ IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+}
+
+func (t *DeleteMarkerEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteMarkerEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteMarkerEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *DeleteObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T DeleteObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T DeleteObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type DeleteObjectResponse struct {
+ DeleteObjectResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteObjectResponse"`
+}
+
+type GetBucketAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetBucketAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetBucketAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetBucketAccessControlPolicyResponse struct {
+ GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketAccessControlPolicyResponse"`
+}
+
+type GetBucketLoggingStatus struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetBucketLoggingStatus
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetBucketLoggingStatus
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetBucketLoggingStatusResponse struct {
+ GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketLoggingStatusResponse"`
+}
+
+type GetObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
+ GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
+ InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectAccessControlPolicyResponse struct {
+ GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectAccessControlPolicyResponse"`
+}
+
+type GetObjectExtended struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"`
+ GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"`
+ InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"`
+ ByteRangeStart int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeStart,omitempty"`
+ ByteRangeEnd int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeEnd,omitempty"`
+ IfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ IfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfMatch,omitempty"`
+ IfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfNoneMatch,omitempty"`
+ ReturnCompleteObjectOnConditionFailure bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ReturnCompleteObjectOnConditionFailure,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *GetObjectExtended) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectExtended
+ var layout struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince)
+ layout.IfUnmodifiedSince = (*xsdDateTime)(&layout.T.IfUnmodifiedSince)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectExtended
+ var overlay struct {
+ *T
+ IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"`
+ IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince)
+ overlay.IfUnmodifiedSince = (*xsdDateTime)(&overlay.T.IfUnmodifiedSince)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type GetObjectExtendedResponse struct {
+ GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+}
+
+type GetObjectResponse struct {
+ GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"`
+}
+
+type GetObjectResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+}
+
+func (t *GetObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T GetObjectResult
+ var layout struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.Data = (*xsdBase64Binary)(&layout.T.Data)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T GetObjectResult
+ var overlay struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"`
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type Grant struct {
+ Grantee Grantee `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grantee"`
+ Permission Permission `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Permission"`
+}
+
+type Grantee struct {
+}
+
+type Group struct {
+ URI string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ URI"`
+}
+
+type ListAllMyBuckets struct {
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+}
+
+func (t *ListAllMyBuckets) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListAllMyBuckets
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListAllMyBuckets
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListAllMyBucketsEntry struct {
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+}
+
+func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListAllMyBucketsEntry
+ var layout struct {
+ *T
+ CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ }
+ layout.T = (*T)(t)
+ layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListAllMyBucketsEntry
+ var overlay struct {
+ *T
+ CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"`
+ }
+ overlay.T = (*T)(t)
+ overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListAllMyBucketsList struct {
+ Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"`
+}
+
+type ListAllMyBucketsResponse struct {
+ ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResponse"`
+}
+
+type ListAllMyBucketsResult struct {
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
+ Buckets ListAllMyBucketsList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Buckets"`
+}
+
+type ListBucket struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix,omitempty"`
+ Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys,omitempty"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *ListBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListBucket
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListBucket
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListBucketResponse struct {
+ ListBucketResponse ListBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResponse"`
+}
+
+type ListBucketResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+ Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"`
+ NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
+ Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"`
+ CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
+}
+
+type ListEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+}
+
+func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T ListEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T ListEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type ListVersionsResponse struct {
+ ListVersionsResponse ListVersionsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResponse"`
+}
+
+type ListVersionsResult struct {
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+ KeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ KeyMarker"`
+ VersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionIdMarker"`
+ NextKeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextKeyMarker,omitempty"`
+ NextVersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextVersionIdMarker,omitempty"`
+ MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"`
+ Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"`
+ IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"`
+ Version VersionEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version"`
+ DeleteMarker DeleteMarkerEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker"`
+ CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"`
+}
+
+type LoggingSettings struct {
+ TargetBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetBucket"`
+ TargetPrefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetPrefix"`
+ TargetGrants AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetGrants,omitempty"`
+}
+
+// May be one of COPY, REPLACE
+type MetadataDirective string
+
+type MetadataEntry struct {
+ Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"`
+ Value string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Value"`
+}
+
+// May be one of Enabled, Disabled
+type MfaDeleteStatus string
+
+type NotificationConfiguration struct {
+ TopicConfiguration []TopicConfiguration `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TopicConfiguration,omitempty"`
+}
+
+// May be one of BucketOwner, Requester
+type Payer string
+
+// May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL
+type Permission string
+
+type PostResponse struct {
+ Location string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Location"`
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+}
+
+type PrefixEntry struct {
+ Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"`
+}
+
+type PutObject struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *PutObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObject
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObject
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type PutObjectInline struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"`
+ Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *PutObjectInline) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObjectInline
+ var layout struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Data = (*xsdBase64Binary)(&layout.T.Data)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObjectInline
+ var overlay struct {
+ *T
+ Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"`
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Data = (*xsdBase64Binary)(&overlay.T.Data)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type PutObjectInlineResponse struct {
+ PutObjectInlineResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectInlineResponse"`
+}
+
+type PutObjectResponse struct {
+ PutObjectResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectResponse"`
+}
+
+type PutObjectResult struct {
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+}
+
+func (t *PutObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T PutObjectResult
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T PutObjectResult
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type RequestPaymentConfiguration struct {
+ Payer Payer `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Payer"`
+}
+
+type Result struct {
+ Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"`
+}
+
+type SetBucketAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *SetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetBucketAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetBucketAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetBucketAccessControlPolicyResponse struct {
+}
+
+type SetBucketLoggingStatus struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+ BucketLoggingStatus BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketLoggingStatus"`
+}
+
+func (t *SetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetBucketLoggingStatus
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetBucketLoggingStatus
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetBucketLoggingStatusResponse struct {
+}
+
+type SetObjectAccessControlPolicy struct {
+ Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"`
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
+ AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"`
+ Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"`
+ Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"`
+}
+
+func (t *SetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T SetObjectAccessControlPolicy
+ var layout struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ layout.T = (*T)(t)
+ layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp)
+ return e.EncodeElement(layout, start)
+}
+func (t *SetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T SetObjectAccessControlPolicy
+ var overlay struct {
+ *T
+ Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"`
+ }
+ overlay.T = (*T)(t)
+ overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type SetObjectAccessControlPolicyResponse struct {
+}
+
+type Status struct {
+ Code int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Code"`
+ Description string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Description"`
+}
+
+// May be one of STANDARD, REDUCED_REDUNDANCY, GLACIER, UNKNOWN
+type StorageClass string
+
+type TopicConfiguration struct {
+ Topic string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Topic"`
+ Event []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Event"`
+}
+
+type User struct {
+}
+
+type VersionEntry struct {
+ Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"`
+ VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"`
+ IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"`
+ LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"`
+ Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"`
+ Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"`
+ StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"`
+}
+
+func (t *VersionEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type T VersionEntry
+ var layout struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ layout.T = (*T)(t)
+ layout.LastModified = (*xsdDateTime)(&layout.T.LastModified)
+ return e.EncodeElement(layout, start)
+}
+func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ type T VersionEntry
+ var overlay struct {
+ *T
+ LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"`
+ }
+ overlay.T = (*T)(t)
+ overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified)
+ return d.DecodeElement(&overlay, &start)
+}
+
+type VersioningConfiguration struct {
+ Status VersioningStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status,omitempty"`
+ MfaDelete MfaDeleteStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MfaDelete,omitempty"`
+}
+
+// May be one of Enabled, Suspended
+type VersioningStatus string
+
+type xsdBase64Binary []byte
+
+func (b *xsdBase64Binary) UnmarshalText(text []byte) (err error) {
+ *b, err = base64.StdEncoding.DecodeString(string(text))
+ return
+}
+func (b xsdBase64Binary) MarshalText() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := base64.NewEncoder(base64.StdEncoding, &buf)
+ enc.Write([]byte(b))
+ enc.Close()
+ return buf.Bytes(), nil
+}
+
+type xsdDateTime time.Time
+
+func (t *xsdDateTime) UnmarshalText(text []byte) error {
+ return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999")
+}
+func (t xsdDateTime) MarshalText() ([]byte, error) {
+ return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil
+}
+func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if (time.Time)(t).IsZero() {
+ return nil
+ }
+ m, err := t.MarshalText()
+ if err != nil {
+ return err
+ }
+ return e.EncodeElement(m, start)
+}
+func (t xsdDateTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
+ if (time.Time)(t).IsZero() {
+ return xml.Attr{}, nil
+ }
+ m, err := t.MarshalText()
+ return xml.Attr{Name: name, Value: string(m)}, err
+}
+func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {
+ s := string(bytes.TrimSpace(text))
+ *t, err = time.Parse(format, s)
+ if _, ok := err.(*time.ParseError); ok {
+ *t, err = time.Parse(format+"Z07:00", s)
+ }
+ return err
+}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 1ec5439a5..830d8ebe1 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"strconv"
)
@@ -162,7 +163,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
}
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
- err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsDeleteData)
+ err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData)
return &filer_pb.DeleteEntryResponse{}, err
}
@@ -211,3 +212,12 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
PublicUrl: assignResult.PublicUrl,
}, err
}
+
+func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
+
+ for _, master := range fs.option.Masters {
+ _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection))
+ }
+
+ return &filer_pb.DeleteCollectionResponse{}, err
+}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 601790f8a..61ca972cc 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -1,7 +1,6 @@
package weed_server
import (
- "net/http"
"github.com/chrislusf/seaweedfs/weed/filer2"
_ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
@@ -9,8 +8,9 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
- "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "net/http"
)
type FilerOption struct {
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index dbd91c5e0..77374147a 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -10,10 +10,10 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/util"
- "strconv"
- "mime/multipart"
"mime"
+ "mime/multipart"
"path"
+ "strconv"
)
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index ba7c17b79..52be6735c 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -15,6 +15,12 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "os"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
)
type FilerPostResult struct {
@@ -27,9 +33,20 @@ type FilerPostResult struct {
func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Request, path string) (fileId, urlLocation string, err error) {
var entry *filer2.Entry
- if entry, err = fs.filer.FindEntry(filer2.FullPath(path)); err != nil {
+ entry, err = fs.filer.FindEntry(filer2.FullPath(path))
+ if err == filer2.ErrNotFound {
+ return "", "", nil
+ }
+
+ if err != nil {
glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
+ return
+ }
+
+ if len(entry.Chunks) == 0 {
+ glog.V(1).Infof("empty entry: %s", path)
+ w.WriteHeader(http.StatusNoContent)
} else {
fileId = entry.Chunks[0].FileId
urlLocation, err = operation.LookupFileId(fs.filer.GetMaster(), fileId)
@@ -59,9 +76,10 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
DataCenter: "",
}
}
+
assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest)
if ae != nil {
- glog.V(0).Infoln("failing to assign a file id", ae.Error())
+ glog.Errorf("failing to assign a file id: %v", ae)
writeJsonError(w, r, http.StatusInternalServerError, ae)
err = ae
return
@@ -91,21 +109,16 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return
}
- var fileId, urlLocation string
- var err error
-
- if strings.HasPrefix(r.Header.Get("Content-Type"), "multipart/form-data; boundary=") {
- fileId, urlLocation, err = fs.multipartUploadAnalyzer(w, r, replication, collection, dataCenter)
- if err != nil {
- return
- }
- } else {
- fileId, urlLocation, err = fs.monolithicUploadAnalyzer(w, r, replication, collection, dataCenter)
- if err != nil {
- return
- }
+ fileId, urlLocation, err := fs.queryFileInfoByPath(w, r, r.URL.Path)
+ if err == nil && fileId == "" {
+ fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
+ }
+ if err != nil || fileId == "" || urlLocation == "" {
+ return
}
+ glog.V(0).Infof("request header %+v, urlLocation: %v", r.Header, urlLocation)
+
u, _ := url.Parse(urlLocation)
// This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off
@@ -118,6 +131,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
glog.V(4).Infoln("post to", u)
+ // send request to volume server
request := &http.Request{
Method: r.Method,
URL: u,
@@ -131,7 +145,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
resp, do_err := util.Do(request)
if do_err != nil {
- glog.V(0).Infoln("failing to connect to volume server", r.RequestURI, do_err.Error())
+ glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method)
writeJsonError(w, r, http.StatusInternalServerError, do_err)
return
}
@@ -155,6 +169,8 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
writeJsonError(w, r, http.StatusInternalServerError, errors.New(ret.Error))
return
}
+
+ // find correct final path
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if ret.Name != "" {
@@ -168,16 +184,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
}
- // also delete the old fid unless PUT operation
- if r.Method != "PUT" {
- if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
- oldFid := entry.Chunks[0].FileId
- operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
- } else if err != nil && err != filer2.ErrNotFound {
- glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
- }
- }
-
+ // update metadata in filer store
glog.V(4).Infoln("saving", path, "=>", fileId)
entry := &filer2.Entry{
FullPath: filer2.FullPath(path),
@@ -185,13 +192,15 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
},
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
- Size: uint64(r.ContentLength),
+ Size: uint64(ret.Size),
Mtime: time.Now().UnixNano(),
}},
}
@@ -202,6 +211,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return
}
+ // send back post result
reply := FilerPostResult{
Name: ret.Name,
Size: ret.Size,
@@ -215,12 +225,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// curl -X DELETE http://localhost:8888/path/to
func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
- err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), true)
+ err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), false, true)
if err != nil {
- glog.V(4).Infoln("deleting", r.URL.Path, ":", err.Error())
+ glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
- writeJsonQuiet(w, r, http.StatusAccepted, map[string]string{"error": ""})
+ w.WriteHeader(http.StatusNoContent)
}
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index f87e7d65a..4b1745aaa 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -7,6 +7,7 @@ import (
"net/http"
"path"
"strconv"
+ "strings"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
@@ -143,15 +144,9 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
}
path := r.URL.Path
- // also delete the old fid unless PUT operation
- if r.Method != "PUT" {
- if entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil {
- for _, chunk := range entry.Chunks {
- oldFid := chunk.FileId
- operation.DeleteFile(fs.filer.GetMaster(), oldFid, fs.jwt(oldFid))
- }
- } else if err != nil {
- glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
+ if strings.HasSuffix(path, "/") {
+ if fileName != "" {
+ path += fileName
}
}
@@ -162,6 +157,8 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
Replication: replication,
Collection: collection,
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
diff --git a/weed/server/filer_server_handlers_write_monopart.go b/weed/server/filer_server_handlers_write_monopart.go
deleted file mode 100644
index 777d5bc43..000000000
--- a/weed/server/filer_server_handlers_write_monopart.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package weed_server
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
-)
-
-var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
-
-func escapeQuotes(s string) string {
- return quoteEscaper.Replace(s)
-}
-
-func createFormFile(writer *multipart.Writer, fieldname, filename, mime string) (io.Writer, error) {
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- escapeQuotes(fieldname), escapeQuotes(filename)))
- if len(mime) == 0 {
- mime = "application/octet-stream"
- }
- h.Set("Content-Type", mime)
- return writer.CreatePart(h)
-}
-
-func makeFormData(filename, mimeType string, content io.Reader) (formData io.Reader, contentType string, err error) {
- buf := new(bytes.Buffer)
- writer := multipart.NewWriter(buf)
- defer writer.Close()
-
- part, err := createFormFile(writer, "file", filename, mimeType)
- if err != nil {
- glog.V(0).Infoln(err)
- return
- }
- _, err = io.Copy(part, content)
- if err != nil {
- glog.V(0).Infoln(err)
- return
- }
-
- formData = buf
- contentType = writer.FormDataContentType()
-
- return
-}
-
-func checkContentMD5(w http.ResponseWriter, r *http.Request) (err error) {
- if contentMD5 := r.Header.Get("Content-MD5"); contentMD5 != "" {
- buf, _ := ioutil.ReadAll(r.Body)
- //checkMD5
- sum := md5.Sum(buf)
- fileDataMD5 := base64.StdEncoding.EncodeToString(sum[0:len(sum)])
- if strings.ToLower(fileDataMD5) != strings.ToLower(contentMD5) {
- glog.V(0).Infof("fileDataMD5 [%s] is not equal to Content-MD5 [%s]", fileDataMD5, contentMD5)
- err = fmt.Errorf("MD5 check failed")
- writeJsonError(w, r, http.StatusNotAcceptable, err)
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- }
- return
-}
-
-func (fs *FilerServer) monolithicUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) {
- /*
- Amazon S3 ref link:[http://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html]
- There is a long way to provide a completely compatibility against all Amazon S3 API, I just made
- a simple data stream adapter between S3 PUT API and seaweedfs's volume storage Write API
- 1. The request url format should be http://$host:$port/$bucketName/$objectName
- 2. bucketName will be mapped to seaweedfs's collection name
- 3. You could customize and make your enhancement.
- */
- lastPos := strings.LastIndex(r.URL.Path, "/")
- if lastPos == -1 || lastPos == 0 || lastPos == len(r.URL.Path)-1 {
- glog.V(0).Infof("URL Path [%s] is invalid, could not retrieve file name", r.URL.Path)
- err = fmt.Errorf("URL Path is invalid")
- writeJsonError(w, r, http.StatusInternalServerError, err)
- return
- }
-
- if err = checkContentMD5(w, r); err != nil {
- return
- }
-
- fileName := r.URL.Path[lastPos+1:]
- if err = multipartHttpBodyBuilder(w, r, fileName); err != nil {
- return
- }
-
- secondPos := strings.Index(r.URL.Path[1:], "/") + 1
- collection = r.URL.Path[1:secondPos]
- path := r.URL.Path
-
- if fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path); err == nil && fileId == "" {
- fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
- }
- return
-}
-
-func multipartHttpBodyBuilder(w http.ResponseWriter, r *http.Request, fileName string) (err error) {
- body, contentType, te := makeFormData(fileName, r.Header.Get("Content-Type"), r.Body)
- if te != nil {
- glog.V(0).Infoln("S3 protocol to raw seaweed protocol failed", te.Error())
- writeJsonError(w, r, http.StatusInternalServerError, te)
- err = te
- return
- }
-
- if body != nil {
- switch v := body.(type) {
- case *bytes.Buffer:
- r.ContentLength = int64(v.Len())
- case *bytes.Reader:
- r.ContentLength = int64(v.Len())
- case *strings.Reader:
- r.ContentLength = int64(v.Len())
- }
- }
-
- r.Header.Set("Content-Type", contentType)
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- r.Body = rc
- return
-}
diff --git a/weed/server/filer_server_handlers_write_multipart.go b/weed/server/filer_server_handlers_write_multipart.go
deleted file mode 100644
index 056317750..000000000
--- a/weed/server/filer_server_handlers_write_multipart.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package weed_server
-
-import (
- "bytes"
- "io/ioutil"
- "net/http"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage"
-)
-
-func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) {
- //Default handle way for http multipart
- if r.Method == "PUT" {
- buf, _ := ioutil.ReadAll(r.Body)
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
- fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r)
- if pe != nil {
- glog.V(0).Infoln("failing to parse post body", pe.Error())
- writeJsonError(w, r, http.StatusInternalServerError, pe)
- err = pe
- return
- }
- //reconstruct http request body for following new request to volume server
- r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
-
- path := r.URL.Path
- if strings.HasSuffix(path, "/") {
- if fileName != "" {
- path += fileName
- }
- }
- fileId, urlLocation, err = fs.queryFileInfoByPath(w, r, path)
- } else {
- fileId, urlLocation, err = fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
- }
- return
-}
diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go
index c91ab0407..627fe354e 100644
--- a/weed/server/raft_server_handlers.go
+++ b/weed/server/raft_server_handlers.go
@@ -1,8 +1,8 @@
package weed_server
import (
- "net/http"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "net/http"
)
func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
diff --git a/weed/server/volume_grpc_client.go b/weed/server/volume_grpc_client.go
index b3c755239..de6fa23c7 100644
--- a/weed/server/volume_grpc_client.go
+++ b/weed/server/volume_grpc_client.go
@@ -7,8 +7,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "golang.org/x/net/context"
"github.com/chrislusf/seaweedfs/weed/util"
+ "golang.org/x/net/context"
)
func (vs *VolumeServer) GetMaster() string {
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 9294f9bf6..037fca2c2 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -1,10 +1,10 @@
package weed_server
import (
- "net/http"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage"
+ "net/http"
)
type VolumeServer struct {
diff --git a/weed/server/volume_server_handlers_sync.go b/weed/server/volume_server_handlers_sync.go
index 38adfe870..c6e32bb9b 100644
--- a/weed/server/volume_server_handlers_sync.go
+++ b/weed/server/volume_server_handlers_sync.go
@@ -6,8 +6,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
- "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func (vs *VolumeServer) getVolumeSyncStatusHandler(w http.ResponseWriter, r *http.Request) {
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 55ef2a613..d32958339 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -9,8 +9,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/topology"
- "time"
"strconv"
+ "time"
)
func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
@@ -55,7 +55,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
volumeId, _ := storage.NewVolumeId(vid)
n.ParsePath(fid)
- glog.V(2).Infoln("deleting", n)
+ glog.V(2).Infof("volume %s deleting %s", vid, n)
cookie := n.Cookie
diff --git a/weed/storage/file_id.go b/weed/storage/file_id.go
index 0871bfb25..37dcb7c70 100644
--- a/weed/storage/file_id.go
+++ b/weed/storage/file_id.go
@@ -20,11 +20,15 @@ func NewFileId(VolumeId VolumeId, key uint64, cookie uint32) *FileId {
}
func (n *FileId) String() string {
+ return n.VolumeId.String() + "," + formatNeedleIdCookie(n.Key, n.Cookie)
+}
+
+func formatNeedleIdCookie(key NeedleId, cookie Cookie) string {
bytes := make([]byte, NeedleIdSize+CookieSize)
- NeedleIdToBytes(bytes[0:NeedleIdSize], n.Key)
- CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], n.Cookie)
+ NeedleIdToBytes(bytes[0:NeedleIdSize], key)
+ CookieToBytes(bytes[NeedleIdSize:NeedleIdSize+CookieSize], cookie)
nonzero_index := 0
for ; bytes[nonzero_index] == 0; nonzero_index++ {
}
- return n.VolumeId.String() + "," + hex.EncodeToString(bytes[nonzero_index:])
+ return hex.EncodeToString(bytes[nonzero_index:])
}
diff --git a/weed/storage/needle.go b/weed/storage/needle.go
index 46ba933ca..31bada091 100644
--- a/weed/storage/needle.go
+++ b/weed/storage/needle.go
@@ -29,12 +29,12 @@ type Needle struct {
DataSize uint32 `comment:"Data size"` //version2
Data []byte `comment:"The actual file data"`
- Flags byte `comment:"boolean flags"` //version2
- NameSize uint8 //version2
+ Flags byte `comment:"boolean flags"` //version2
+ NameSize uint8 //version2
Name []byte `comment:"maximum 256 characters"` //version2
- MimeSize uint8 //version2
+ MimeSize uint8 //version2
Mime []byte `comment:"maximum 256 characters"` //version2
- PairsSize uint16 //version2
+ PairsSize uint16 //version2
Pairs []byte `comment:"additional name value pairs, json format, maximum 64kB"`
LastModified uint64 //only store LastModifiedBytesLength bytes, which is 5 bytes to disk
Ttl *TTL
@@ -44,7 +44,7 @@ type Needle struct {
}
func (n *Needle) String() (str string) {
- str = fmt.Sprintf("Cookie:%d, Id:%d, Size:%d, DataSize:%d, Name: %s, Mime: %s", n.Cookie, n.Id, n.Size, n.DataSize, n.Name, n.Mime)
+ str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime)
return
}
@@ -134,7 +134,7 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
dotSep := strings.LastIndex(r.URL.Path, ".")
fid := r.URL.Path[commaSep+1:]
if dotSep > 0 {
- fid = r.URL.Path[commaSep+1: dotSep]
+ fid = r.URL.Path[commaSep+1 : dotSep]
}
e = n.ParsePath(fid)
diff --git a/weed/storage/needle/btree_map.go b/weed/storage/needle/btree_map.go
index a7c9982ac..d688b802e 100644
--- a/weed/storage/needle/btree_map.go
+++ b/weed/storage/needle/btree_map.go
@@ -1,8 +1,8 @@
package needle
import (
- "github.com/google/btree"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/google/btree"
)
//This map assumes mostly inserting increasing keys
diff --git a/weed/storage/needle/compact_map.go b/weed/storage/needle/compact_map.go
index 7b653d838..9852dca74 100644
--- a/weed/storage/needle/compact_map.go
+++ b/weed/storage/needle/compact_map.go
@@ -1,8 +1,8 @@
package needle
import (
- "sync"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "sync"
)
type CompactSection struct {
diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle/compact_map_perf_test.go
index 7908e8d1d..a66836ac8 100644
--- a/weed/storage/needle/compact_map_perf_test.go
+++ b/weed/storage/needle/compact_map_perf_test.go
@@ -6,8 +6,8 @@ import (
"testing"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestMemoryUsage(t *testing.T) {
@@ -30,9 +30,9 @@ func loadNewNeedleMap(file *os.File) {
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {
- key := BytesToNeedleId(bytes[i:i+NeedleIdSize])
- offset := BytesToOffset(bytes[i+NeedleIdSize:i+NeedleIdSize+OffsetSize])
- size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize:i+NeedleIdSize+OffsetSize+SizeSize])
+ key := BytesToNeedleId(bytes[i : i+NeedleIdSize])
+ offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize])
+ size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize])
if offset > 0 {
m.Set(NeedleId(key), offset, size)
diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go
index 11d98eab3..b4cbb446a 100644
--- a/weed/storage/needle/compact_map_test.go
+++ b/weed/storage/needle/compact_map_test.go
@@ -1,8 +1,8 @@
package needle
import (
- "testing"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "testing"
)
func TestIssue52(t *testing.T) {
diff --git a/weed/storage/needle/needle_value.go b/weed/storage/needle/needle_value.go
index 8fd7b1b1c..b15d25245 100644
--- a/weed/storage/needle/needle_value.go
+++ b/weed/storage/needle/needle_value.go
@@ -1,8 +1,8 @@
package needle
import (
- "github.com/google/btree"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/google/btree"
)
const (
diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go
index ce2079e89..6d815679b 100644
--- a/weed/storage/needle_map.go
+++ b/weed/storage/needle_map.go
@@ -57,8 +57,8 @@ func (nm *baseNeedleMapper) IndexFileName() string {
func IdxFileEntry(bytes []byte) (key NeedleId, offset Offset, size uint32) {
key = BytesToNeedleId(bytes[:NeedleIdSize])
- offset = BytesToOffset(bytes[NeedleIdSize:NeedleIdSize+OffsetSize])
- size = util.BytesToUint32(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize])
+ offset = BytesToOffset(bytes[NeedleIdSize : NeedleIdSize+OffsetSize])
+ size = util.BytesToUint32(bytes[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
return
}
func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size uint32) error {
diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go
index d5062a1b7..fd335fd00 100644
--- a/weed/storage/needle_map_boltdb.go
+++ b/weed/storage/needle_map_boltdb.go
@@ -6,11 +6,11 @@ import (
"github.com/boltdb/bolt"
+ "errors"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
- "errors"
)
type BoltDbNeedleMap struct {
@@ -101,7 +101,7 @@ func (m *BoltDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok boo
}
offset = BytesToOffset(data[0:OffsetSize])
- size = util.BytesToUint32(data[OffsetSize:OffsetSize+SizeSize])
+ size = util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
return nil
})
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 1af88e545..1580bb005 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -81,7 +81,7 @@ func (m *LevelDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bo
return nil, false
}
offset := BytesToOffset(data[0:OffsetSize])
- size := util.BytesToUint32(data[OffsetSize:OffsetSize+SizeSize])
+ size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize])
return &needle.NeedleValue{Key: NeedleId(key), Offset: offset, Size: size}, true
}
diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go
index 690ddd737..fa5576c2b 100644
--- a/weed/storage/needle_map_memory.go
+++ b/weed/storage/needle_map_memory.go
@@ -88,7 +88,7 @@ func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32)
for count > 0 && e == nil || e == io.EOF {
for i = 0; i+NeedleEntrySize <= count; i += NeedleEntrySize {
- key, offset, size = IdxFileEntry(bytes[i: i+NeedleEntrySize])
+ key, offset, size = IdxFileEntry(bytes[i : i+NeedleEntrySize])
if e = fn(key, offset, size); e != nil {
return e
}
diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go
index 793a9ea10..3bcb140f1 100644
--- a/weed/storage/needle_map_metric.go
+++ b/weed/storage/needle_map_metric.go
@@ -2,10 +2,10 @@ package storage
import (
"fmt"
- "os"
- "github.com/willf/bloom"
"github.com/chrislusf/seaweedfs/weed/glog"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/willf/bloom"
+ "os"
)
type mapMetric struct {
diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go
index 400e655a7..539f83a87 100644
--- a/weed/storage/needle_map_metric_test.go
+++ b/weed/storage/needle_map_metric_test.go
@@ -1,11 +1,11 @@
package storage
import (
- "testing"
- "io/ioutil"
- "math/rand"
"github.com/chrislusf/seaweedfs/weed/glog"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "io/ioutil"
+ "math/rand"
+ "testing"
)
func TestFastLoadingNeedleMapMetrics(t *testing.T) {
diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go
index 112ec32d4..af12b994d 100644
--- a/weed/storage/needle_parse_multipart.go
+++ b/weed/storage/needle_parse_multipart.go
@@ -1,12 +1,12 @@
package storage
import (
- "mime"
- "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "io/ioutil"
+ "mime"
"net/http"
"path"
- "io/ioutil"
"strings"
)
diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go
index a62539afc..bfd325475 100644
--- a/weed/storage/needle_read_write.go
+++ b/weed/storage/needle_read_write.go
@@ -6,8 +6,8 @@ import (
"io"
"os"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -59,7 +59,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
actualSize = NeedleEntrySize + int64(n.Size)
padding := NeedlePaddingSize - ((NeedleEntrySize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
- _, err = w.Write(header[0: NeedleChecksumSize+padding])
+ _, err = w.Write(header[0 : NeedleChecksumSize+padding])
return
case Version2:
header := make([]byte, NeedleEntrySize)
@@ -123,7 +123,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
}
if n.HasLastModifiedDate() {
util.Uint64toBytes(header[0:8], n.LastModified)
- if _, err = w.Write(header[8-LastModifiedBytesLength: 8]); err != nil {
+ if _, err = w.Write(header[8-LastModifiedBytesLength : 8]); err != nil {
return
}
}
@@ -145,7 +145,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i
}
padding := NeedlePaddingSize - ((NeedleEntrySize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
- _, err = w.Write(header[0: NeedleChecksumSize+padding])
+ _, err = w.Write(header[0 : NeedleChecksumSize+padding])
return n.DataSize, getActualSize(n.Size), err
}
@@ -169,14 +169,14 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
}
switch version {
case Version1:
- n.Data = bytes[NeedleEntrySize: NeedleEntrySize+size]
+ n.Data = bytes[NeedleEntrySize : NeedleEntrySize+size]
case Version2:
- n.readNeedleDataVersion2(bytes[NeedleEntrySize: NeedleEntrySize+int(n.Size)])
+ n.readNeedleDataVersion2(bytes[NeedleEntrySize : NeedleEntrySize+int(n.Size)])
}
if size == 0 {
return nil
}
- checksum := util.BytesToUint32(bytes[NeedleEntrySize+size: NeedleEntrySize+size+NeedleChecksumSize])
+ checksum := util.BytesToUint32(bytes[NeedleEntrySize+size : NeedleEntrySize+size+NeedleChecksumSize])
newChecksum := NewCRC(n.Data)
if checksum != newChecksum.Value() {
return errors.New("CRC error! Data On Disk Corrupted")
@@ -187,21 +187,21 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
func (n *Needle) ParseNeedleHeader(bytes []byte) {
n.Cookie = BytesToCookie(bytes[0:CookieSize])
- n.Id = BytesToNeedleId(bytes[CookieSize:CookieSize+NeedleIdSize])
- n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize:NeedleEntrySize])
+ n.Id = BytesToNeedleId(bytes[CookieSize : CookieSize+NeedleIdSize])
+ n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleEntrySize])
}
func (n *Needle) readNeedleDataVersion2(bytes []byte) {
index, lenBytes := 0, len(bytes)
if index < lenBytes {
- n.DataSize = util.BytesToUint32(bytes[index: index+4])
+ n.DataSize = util.BytesToUint32(bytes[index : index+4])
index = index + 4
if int(n.DataSize)+index > lenBytes {
// this if clause is due to bug #87 and #93, fixed in v0.69
// remove this clause later
return
}
- n.Data = bytes[index: index+int(n.DataSize)]
+ n.Data = bytes[index : index+int(n.DataSize)]
index = index + int(n.DataSize)
n.Flags = bytes[index]
index = index + 1
@@ -209,25 +209,25 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) {
if index < lenBytes && n.HasName() {
n.NameSize = uint8(bytes[index])
index = index + 1
- n.Name = bytes[index: index+int(n.NameSize)]
+ n.Name = bytes[index : index+int(n.NameSize)]
index = index + int(n.NameSize)
}
if index < lenBytes && n.HasMime() {
n.MimeSize = uint8(bytes[index])
index = index + 1
- n.Mime = bytes[index: index+int(n.MimeSize)]
+ n.Mime = bytes[index : index+int(n.MimeSize)]
index = index + int(n.MimeSize)
}
if index < lenBytes && n.HasLastModifiedDate() {
- n.LastModified = util.BytesToUint64(bytes[index: index+LastModifiedBytesLength])
+ n.LastModified = util.BytesToUint64(bytes[index : index+LastModifiedBytesLength])
index = index + LastModifiedBytesLength
}
if index < lenBytes && n.HasTtl() {
- n.Ttl = LoadTTLFromBytes(bytes[index: index+TtlBytesLength])
+ n.Ttl = LoadTTLFromBytes(bytes[index : index+TtlBytesLength])
index = index + TtlBytesLength
}
if index < lenBytes && n.HasPairs() {
- n.PairsSize = util.BytesToUint16(bytes[index: index+2])
+ n.PairsSize = util.BytesToUint16(bytes[index : index+2])
index += 2
end := index + int(n.PairsSize)
n.Pairs = bytes[index:end]
diff --git a/weed/storage/needle_test.go b/weed/storage/needle_test.go
index 4dce3f123..65036409c 100644
--- a/weed/storage/needle_test.go
+++ b/weed/storage/needle_test.go
@@ -1,8 +1,8 @@
package storage
import (
- "testing"
"github.com/chrislusf/seaweedfs/weed/storage/types"
+ "testing"
)
func TestParseKeyHash(t *testing.T) {
diff --git a/weed/storage/store.go b/weed/storage/store.go
index ef055ee59..ac125ef4b 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -5,9 +5,9 @@ import (
"strconv"
"strings"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
)
const (
diff --git a/weed/storage/types/needle_id_type.go b/weed/storage/types/needle_id_type.go
index d9e7074b3..cb19a6f45 100644
--- a/weed/storage/types/needle_id_type.go
+++ b/weed/storage/types/needle_id_type.go
@@ -1,9 +1,9 @@
package types
import (
+ "fmt"
"github.com/chrislusf/seaweedfs/weed/util"
"strconv"
- "fmt"
)
type NeedleId uint64
@@ -21,11 +21,11 @@ func NeedleIdToUint64(needleId NeedleId) uint64 {
return uint64(needleId)
}
-func Uint64ToNeedleId(needleId uint64) (NeedleId) {
+func Uint64ToNeedleId(needleId uint64) NeedleId {
return NeedleId(needleId)
}
-func BytesToNeedleId(bytes []byte) (NeedleId) {
+func BytesToNeedleId(bytes []byte) NeedleId {
return NeedleId(util.BytesToUint64(bytes))
}
diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go
index 09ff727e6..8a2054fc5 100644
--- a/weed/storage/types/needle_types.go
+++ b/weed/storage/types/needle_types.go
@@ -1,10 +1,10 @@
package types
import (
- "math"
+ "fmt"
"github.com/chrislusf/seaweedfs/weed/util"
+ "math"
"strconv"
- "fmt"
)
type Offset uint32
@@ -23,11 +23,11 @@ const (
func CookieToBytes(bytes []byte, cookie Cookie) {
util.Uint32toBytes(bytes, uint32(cookie))
}
-func Uint32ToCookie(cookie uint32) (Cookie) {
+func Uint32ToCookie(cookie uint32) Cookie {
return Cookie(cookie)
}
-func BytesToCookie(bytes []byte) (Cookie) {
+func BytesToCookie(bytes []byte) Cookie {
return Cookie(util.BytesToUint32(bytes[0:4]))
}
@@ -43,10 +43,10 @@ func OffsetToBytes(bytes []byte, offset Offset) {
util.Uint32toBytes(bytes, uint32(offset))
}
-func Uint32ToOffset(offset uint32) (Offset) {
+func Uint32ToOffset(offset uint32) Offset {
return Offset(offset)
}
-func BytesToOffset(bytes []byte) (Offset) {
+func BytesToOffset(bytes []byte) Offset {
return Offset(util.BytesToUint32(bytes[0:4]))
}
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index 34b4cfe0d..58ecc73cb 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -5,8 +5,8 @@ import (
"os"
"time"
- . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/glog"
+ . "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go
index 896540621..464d52618 100644
--- a/weed/storage/volume_vacuum_test.go
+++ b/weed/storage/volume_vacuum_test.go
@@ -1,11 +1,11 @@
package storage
import (
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"math/rand"
"os"
"testing"
- "github.com/chrislusf/seaweedfs/weed/storage/types"
)
/*