aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--go.mod6
-rw-r--r--go.sum2
-rw-r--r--test/s3/basic/basic_test.go106
-rw-r--r--weed/command/mount.go2
-rw-r--r--weed/command/mount_std.go1
-rw-r--r--weed/command/s3.go1
-rw-r--r--weed/filesys/dir.go3
-rw-r--r--weed/filesys/wfs.go1
-rw-r--r--weed/s3api/auth_credentials.go6
-rw-r--r--weed/s3api/s3api_bucket_handlers.go10
-rw-r--r--weed/s3api/s3api_object_copy_handlers.go14
-rw-r--r--weed/s3api/s3api_object_handlers.go58
-rw-r--r--weed/s3api/s3api_object_multipart_handlers.go36
-rw-r--r--weed/s3api/s3api_objects_list_handlers.go10
-rw-r--r--weed/s3api/s3api_server.go7
-rw-r--r--weed/server/common.go2
-rw-r--r--weed/server/filer_server_handlers_read.go6
-rw-r--r--weed/server/volume_server_handlers_read.go4
18 files changed, 178 insertions, 97 deletions
diff --git a/go.mod b/go.mod
index 15000077b..e5fc3bbfd 100644
--- a/go.mod
+++ b/go.mod
@@ -32,7 +32,7 @@ require (
github.com/golang/protobuf v1.4.2
github.com/google/btree v1.0.0
github.com/google/uuid v1.1.1
- github.com/gorilla/mux v1.7.3
+ github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
@@ -90,6 +90,4 @@ require (
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
)
-replace (
- go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
-)
+replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
diff --git a/go.sum b/go.sum
index ec8fb2805..28461324e 100644
--- a/go.sum
+++ b/go.sum
@@ -236,6 +236,8 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
+github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
diff --git a/test/s3/basic/basic_test.go b/test/s3/basic/basic_test.go
index 72d55939b..1f9e74fc1 100644
--- a/test/s3/basic/basic_test.go
+++ b/test/s3/basic/basic_test.go
@@ -2,14 +2,14 @@ package basic
import (
"fmt"
- "os"
- "strings"
- "testing"
-
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
)
var (
@@ -109,3 +109,101 @@ func exitErrorf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
}
+
+const (
+ Bucket = "theBucket"
+ object = "foo/bar"
+ Data = "<data>"
+)
+
+func TestObjectOp(t *testing.T) {
+ _, err := svc.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(Bucket),
+ })
+ if err != nil {
+ exitErrorf("Unable to create bucket, %v", err)
+ }
+
+ _, err = svc.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(object),
+ Body: strings.NewReader(Data),
+ })
+ if err != nil {
+ exitErrorf("Unable to put object, %v", err)
+ }
+
+ dest := fmt.Sprintf("%s_bak", object)
+ copyObj, err := svc.CopyObject(&s3.CopyObjectInput{
+ Bucket: aws.String(Bucket),
+ CopySource: aws.String(fmt.Sprintf("%s/%s", Bucket, object)),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to copy object, %v", err)
+ }
+ t.Log("copy object result -> ", copyObj.CopyObjectResult)
+
+ getObj, err := svc.GetObject(&s3.GetObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to get copy object, %v", err)
+ }
+
+ data, err := ioutil.ReadAll(getObj.Body)
+ if err != nil {
+ exitErrorf("Unable to read object data, %v", err)
+ }
+ if string(data) != Data {
+ t.Error("object data -> ", string(data))
+ }
+
+ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
+ Bucket: aws.String(Bucket),
+ Prefix: aws.String("foo/"),
+ })
+ if err != nil {
+ exitErrorf("Unable to list objects, %v", err)
+ }
+ count := 0
+ for _, content := range listObj.Contents {
+ key := aws.StringValue(content.Key)
+ if key == dest {
+ count++
+ } else if key == object {
+ count++
+ }
+ if count == 2 {
+ break
+ }
+ }
+ if count != 2 {
+ exitErrorf("Unable to find two objects, %v", listObj.Contents)
+ }
+
+ _, err = svc.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(object),
+ })
+ if err != nil {
+ exitErrorf("Unable to delete source object, %v", err)
+ }
+
+ _, err = svc.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(Bucket),
+ Key: aws.String(dest),
+ })
+ if err != nil {
+ exitErrorf("Unable to delete object, %v", err)
+ }
+
+ _, err = svc.DeleteBucket(&s3.DeleteBucketInput{
+ Bucket: aws.String(Bucket),
+ })
+
+ if err != nil {
+ exitErrorf("Unable to delete bucket, %v", err)
+ }
+}
diff --git a/weed/command/mount.go b/weed/command/mount.go
index 440aca8c6..a0e573423 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -9,7 +9,6 @@ type MountOptions struct {
filerMountRootPath *string
dir *string
dirAutoCreate *bool
- dirListCacheLimit *int64
collection *string
replication *string
ttlSec *int
@@ -35,7 +34,6 @@ func init() {
mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to")
- mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing")
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 56df740c4..3975575e9 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -165,7 +165,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,
- DirListCacheLimit: *option.dirListCacheLimit,
EntryCacheTtl: 3 * time.Second,
MountUid: uid,
MountGid: gid,
diff --git a/weed/command/s3.go b/weed/command/s3.go
index 7ebd4fab0..92f13673c 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -151,6 +151,7 @@ func (s3opt *S3Options) startS3Server() bool {
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: *s3opt.filer,
+ Port: *s3opt.port,
FilerGrpcAddress: filerGrpcAddress,
Config: *s3opt.config,
DomainName: *s3opt.domainName,
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index 77d01d463..2214b1ac7 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -3,6 +3,7 @@ package filesys
import (
"bytes"
"context"
+ "math"
"os"
"strings"
"time"
@@ -277,7 +278,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath())
meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
- listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(dir.wfs.option.DirListCacheLimit))
+ listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
if listErr != nil {
glog.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index e41693048..68ad987be 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -34,7 +34,6 @@ type Option struct {
CacheDir string
CacheSizeMB int64
DataCenter string
- DirListCacheLimit int64
EntryCacheTtl time.Duration
Umask os.FileMode
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index 30052878b..db5f4c8a3 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -7,7 +7,6 @@ import (
"net/http"
"github.com/golang/protobuf/jsonpb"
- "github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
@@ -110,7 +109,7 @@ func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identi
func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc {
- if iam.isEnabled() {
+ if !iam.isEnabled() {
return f
}
@@ -159,8 +158,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
if !identity.canDo(action, bucket) {
return ErrAccessDenied
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 7d96e3e0e..816db04f9 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -10,7 +10,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -56,8 +55,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
// create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
@@ -70,8 +68,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@@ -100,8 +97,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
index b8fb3f6a4..80ca9afcb 100644
--- a/weed/s3api/s3api_object_copy_handlers.go
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -8,16 +8,12 @@ import (
"strings"
"time"
- "github.com/gorilla/mux"
-
"github.com/chrislusf/seaweedfs/weed/util"
)
func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- dstBucket := vars["bucket"]
- dstObject := getObject(vars)
+ dstBucket, dstObject := getBucketAndObject(r)
// Copy source path.
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
@@ -61,7 +57,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
response := CopyObjectResult{
ETag: etag,
- LastModified: time.Now(),
+ LastModified: time.Now().UTC(),
}
writeSuccessResponseXML(w, encodeResponse(response))
@@ -85,9 +81,7 @@ type CopyPartResult struct {
func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
- vars := mux.Vars(r)
- dstBucket := vars["bucket"]
- // dstObject := getObject(vars)
+ dstBucket, _ := getBucketAndObject(r)
// Copy source path.
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
@@ -143,7 +137,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
response := CopyPartResult{
ETag: etag,
- LastModified: time.Now(),
+ LastModified: time.Now().UTC(),
}
writeSuccessResponseXML(w, encodeResponse(response))
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 0d287c4ff..9773add81 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -32,9 +32,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
_, err := validateContentMd5(r.Header)
if err != nil {
@@ -45,8 +43,13 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
rAuthType := getRequestAuthType(r)
dataReader := r.Body
var s3ErrCode ErrorCode
- if rAuthType == authTypeStreamingSigned {
+ switch rAuthType {
+ case authTypeStreamingSigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ case authTypeSignedV2, authTypePresignedV2:
+ _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
+ case authTypePresigned, authTypeSigned:
+ _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
}
if s3ErrCode != ErrNone {
writeErrorResponse(w, s3ErrCode, r.URL)
@@ -54,25 +57,30 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
}
defer dataReader.Close()
- uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
+ if strings.HasSuffix(object, "/") {
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+ } else {
+ uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
- etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
+ etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
- if errCode != ErrNone {
- writeErrorResponse(w, errCode, r.URL)
- return
- }
+ if errCode != ErrNone {
+ writeErrorResponse(w, errCode, r.URL)
+ return
+ }
- setEtag(w, etag)
+ setEtag(w, etag)
+ }
writeSuccessResponseEmpty(w)
}
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
if strings.HasSuffix(r.URL.Path, "/") {
writeErrorResponse(w, ErrNotImplemented, r.URL)
@@ -88,9 +96,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
destUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
@@ -101,9 +107,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
@@ -117,7 +121,7 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
}
-/// ObjectIdentifier carries key name for the object to delete.
+// / ObjectIdentifier carries key name for the object to delete.
type ObjectIdentifier struct {
ObjectName string `xml:"Key"`
}
@@ -151,8 +155,7 @@ type DeleteObjectsResponse struct {
// DeleteMultipleObjectsHandler - Delete multiple objects
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
deleteXMLBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
@@ -305,10 +308,13 @@ func setEtag(w http.ResponseWriter, etag string) {
}
}
-func getObject(vars map[string]string) string {
- object := vars["object"]
+func getBucketAndObject(r *http.Request) (bucket, object string) {
+ vars := mux.Vars(r)
+ bucket = vars["bucket"]
+ object = vars["object"]
if !strings.HasPrefix(object, "/") {
object = "/" + object
}
- return object
+
+ return
}
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 3282e4176..0ed96afa2 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
- "github.com/gorilla/mux"
)
const (
@@ -21,10 +20,7 @@ const (
// NewMultipartUploadHandler - New multipart upload.
func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- var object, bucket string
- vars := mux.Vars(r)
- bucket = vars["bucket"]
- object = vars["object"]
+ bucket, object := getBucketAndObject(r)
response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
@@ -44,9 +40,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
// CompleteMultipartUploadHandler - Completes multipart upload.
func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
@@ -70,9 +64,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// AbortMultipartUploadHandler - Aborts multipart upload.
func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
@@ -96,8 +88,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
// ListMultipartUploadsHandler - Lists multipart uploads.
func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 {
@@ -135,9 +126,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
// ListObjectPartsHandler - Lists object parts in a multipart upload.
func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
- object := getObject(vars)
+ bucket, object := getBucketAndObject(r)
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
if partNumberMarker < 0 {
@@ -170,10 +159,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
// PutObjectPartHandler - Put an object part in a multipart upload.
func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- rAuthType := getRequestAuthType(r)
+ bucket, _ := getBucketAndObject(r)
uploadID := r.URL.Query().Get("uploadId")
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
@@ -193,10 +179,16 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
return
}
- var s3ErrCode ErrorCode
+ rAuthType := getRequestAuthType(r)
dataReader := r.Body
- if rAuthType == authTypeStreamingSigned {
+ var s3ErrCode ErrorCode
+ switch rAuthType {
+ case authTypeStreamingSigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
+ case authTypeSignedV2, authTypePresignedV2:
+ _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
+ case authTypePresigned, authTypeSigned:
+ _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
}
if s3ErrCode != ErrNone {
writeErrorResponse(w, s3ErrCode, r.URL)
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index 919e6230a..e06faf213 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -11,8 +11,6 @@ import (
"strings"
"time"
- "github.com/gorilla/mux"
-
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -23,10 +21,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
// collect parameters
- vars := mux.Vars(r)
- bucket := vars["bucket"]
-
- glog.V(4).Infof("read v2: %v", vars)
+ bucket, _ := getBucketAndObject(r)
originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
@@ -58,8 +53,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
// collect parameters
- vars := mux.Vars(r)
- bucket := vars["bucket"]
+ bucket, _ := getBucketAndObject(r)
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index 773094a5f..010958245 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -1,6 +1,7 @@
package s3api
import (
+ "fmt"
"net/http"
"github.com/gorilla/mux"
@@ -9,6 +10,7 @@ import (
type S3ApiServerOption struct {
Filer string
+ Port int
FilerGrpcAddress string
Config string
DomainName string
@@ -37,7 +39,10 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
apiRouter := router.PathPrefix("/").Subrouter()
var routers []*mux.Router
if s3a.option.DomainName != "" {
- routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter())
+ routers = append(routers, apiRouter.Host(
+ fmt.Sprintf("%s.%s:%d", "{bucket:.+}", s3a.option.DomainName, s3a.option.Port)).Subrouter())
+ routers = append(routers, apiRouter.Host(
+ fmt.Sprintf("%s.%s", "{bucket:.+}", s3a.option.DomainName)).Subrouter())
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
diff --git a/weed/server/common.go b/weed/server/common.go
index c43b144cb..75fc3ad9e 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -218,7 +218,7 @@ func handleStaticResources2(r *mux.Router) {
r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS)))
}
-func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) {
+func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, filename string) {
if filename != "" {
contentDisposition := "inline"
if r.FormValue("dl") != "" {
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index 76c924df1..657158c2f 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -101,14 +101,14 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
setEtag(w, etag)
+ filename := entry.Name()
+ adjustHeaderContentDisposition(w, r, filename)
+
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
return
}
- filename := entry.Name()
- adjustHeadersAfterHEAD(w, r, filename)
-
totalSize := int64(filer2.TotalSize(entry.Chunks))
if rangeReq := r.Header.Get("Range"); rangeReq == "" {
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index d508849bc..89b7445e9 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -244,13 +244,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
}
w.Header().Set("Accept-Ranges", "bytes")
+ adjustHeaderContentDisposition(w, r, filename)
+
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return nil
}
- adjustHeadersAfterHEAD(w, r, filename)
-
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
if _, e = rs.Seek(offset, 0); e != nil {
return e